blob: 04607d4115d683beb9ba1f396d18307e575e7085 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilsonc13d87e2016-07-20 09:21:15 +010032#include "i915_gem_dmabuf.h"
Yu Zhangeb822892015-02-10 19:05:49 +080033#include "i915_vgpu.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010034#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070035#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010036#include "intel_frontbuffer.h"
Peter Antoine0ccdacf2016-04-13 15:03:25 +010037#include "intel_mocs.h"
Chris Wilsonc13d87e2016-07-20 09:21:15 +010038#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070039#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070041#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080042#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020043#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070044
Chris Wilson05394f32010-11-08 19:18:58 +000045static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Daniel Vettere62b59e2015-01-21 14:53:48 +010046static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson61050802012-04-17 15:31:31 +010047
Chris Wilsonc76ce032013-08-08 14:41:03 +010048static bool cpu_cache_is_coherent(struct drm_device *dev,
49 enum i915_cache_level level)
50{
51 return HAS_LLC(dev) || level != I915_CACHE_NONE;
52}
53
Chris Wilson2c225692013-08-09 12:26:45 +010054static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
55{
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +053056 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
57 return false;
58
Chris Wilson2c225692013-08-09 12:26:45 +010059 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
60 return true;
61
62 return obj->pin_display;
63}
64
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053065static int
66insert_mappable_node(struct drm_i915_private *i915,
67 struct drm_mm_node *node, u32 size)
68{
69 memset(node, 0, sizeof(*node));
70 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
71 size, 0, 0, 0,
72 i915->ggtt.mappable_end,
73 DRM_MM_SEARCH_DEFAULT,
74 DRM_MM_CREATE_DEFAULT);
75}
76
77static void
78remove_mappable_node(struct drm_mm_node *node)
79{
80 drm_mm_remove_node(node);
81}
82
Chris Wilson73aa8082010-09-30 11:46:12 +010083/* some bookkeeping */
84static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
85 size_t size)
86{
Daniel Vetterc20e8352013-07-24 22:40:23 +020087 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010088 dev_priv->mm.object_count++;
89 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020090 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010091}
92
93static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
94 size_t size)
95{
Daniel Vetterc20e8352013-07-24 22:40:23 +020096 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010097 dev_priv->mm.object_count--;
98 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020099 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100100}
101
Chris Wilson21dd3732011-01-26 15:55:56 +0000102static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100103i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100104{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100105 int ret;
106
Chris Wilsond98c52c2016-04-13 17:35:05 +0100107 if (!i915_reset_in_progress(error))
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100108 return 0;
109
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200110 /*
111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112 * userspace. If it takes that long something really bad is going on and
113 * we should simply try to bail out and fail as gracefully as possible.
114 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100115 ret = wait_event_interruptible_timeout(error->reset_queue,
Chris Wilsond98c52c2016-04-13 17:35:05 +0100116 !i915_reset_in_progress(error),
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100117 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200118 if (ret == 0) {
119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
120 return -EIO;
121 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100122 return ret;
Chris Wilsond98c52c2016-04-13 17:35:05 +0100123 } else {
124 return 0;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200125 }
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100126}
127
Chris Wilson54cf91d2010-11-25 18:00:26 +0000128int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100129{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100130 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100131 int ret;
132
Daniel Vetter33196de2012-11-14 17:14:05 +0100133 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100134 if (ret)
135 return ret;
136
137 ret = mutex_lock_interruptible(&dev->struct_mutex);
138 if (ret)
139 return ret;
140
Chris Wilson76c1dec2010-09-25 11:22:51 +0100141 return 0;
142}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100143
Eric Anholt673a3942008-07-30 12:06:12 -0700144int
Eric Anholt5a125c32008-10-22 21:40:13 -0700145i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000146 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700147{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300148 struct drm_i915_private *dev_priv = to_i915(dev);
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200149 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300150 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100151 struct i915_vma *vma;
Chris Wilson6299f992010-11-24 12:23:44 +0000152 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700153
Chris Wilson6299f992010-11-24 12:23:44 +0000154 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100155 mutex_lock(&dev->struct_mutex);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000156 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100157 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100158 pinned += vma->node.size;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000159 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100160 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100161 pinned += vma->node.size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100162 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700163
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300164 args->aper_size = ggtt->base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400165 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000166
Eric Anholt5a125c32008-10-22 21:40:13 -0700167 return 0;
168}
169
Chris Wilson6a2c4232014-11-04 04:51:40 -0800170static int
171i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
Chris Wilson00731152014-05-21 12:42:56 +0100172{
Al Viro93c76a32015-12-04 23:45:44 -0500173 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800174 char *vaddr = obj->phys_handle->vaddr;
175 struct sg_table *st;
176 struct scatterlist *sg;
177 int i;
Chris Wilson00731152014-05-21 12:42:56 +0100178
Chris Wilson6a2c4232014-11-04 04:51:40 -0800179 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
180 return -EINVAL;
Chris Wilson00731152014-05-21 12:42:56 +0100181
Chris Wilson6a2c4232014-11-04 04:51:40 -0800182 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
183 struct page *page;
184 char *src;
185
186 page = shmem_read_mapping_page(mapping, i);
187 if (IS_ERR(page))
188 return PTR_ERR(page);
189
190 src = kmap_atomic(page);
191 memcpy(vaddr, src, PAGE_SIZE);
192 drm_clflush_virt_range(vaddr, PAGE_SIZE);
193 kunmap_atomic(src);
194
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300195 put_page(page);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800196 vaddr += PAGE_SIZE;
197 }
198
Chris Wilsonc0336662016-05-06 15:40:21 +0100199 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson6a2c4232014-11-04 04:51:40 -0800200
201 st = kmalloc(sizeof(*st), GFP_KERNEL);
202 if (st == NULL)
203 return -ENOMEM;
204
205 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
206 kfree(st);
207 return -ENOMEM;
208 }
209
210 sg = st->sgl;
211 sg->offset = 0;
212 sg->length = obj->base.size;
213
214 sg_dma_address(sg) = obj->phys_handle->busaddr;
215 sg_dma_len(sg) = obj->base.size;
216
217 obj->pages = st;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800218 return 0;
219}
220
221static void
222i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
223{
224 int ret;
225
226 BUG_ON(obj->madv == __I915_MADV_PURGED);
227
228 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilsonf4457ae2016-04-13 17:35:08 +0100229 if (WARN_ON(ret)) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800230 /* In the event of a disaster, abandon all caches and
231 * hope for the best.
232 */
Chris Wilson6a2c4232014-11-04 04:51:40 -0800233 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
234 }
235
236 if (obj->madv == I915_MADV_DONTNEED)
237 obj->dirty = 0;
238
239 if (obj->dirty) {
Al Viro93c76a32015-12-04 23:45:44 -0500240 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800241 char *vaddr = obj->phys_handle->vaddr;
Chris Wilson00731152014-05-21 12:42:56 +0100242 int i;
243
244 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800245 struct page *page;
246 char *dst;
Chris Wilson00731152014-05-21 12:42:56 +0100247
Chris Wilson6a2c4232014-11-04 04:51:40 -0800248 page = shmem_read_mapping_page(mapping, i);
249 if (IS_ERR(page))
250 continue;
251
252 dst = kmap_atomic(page);
253 drm_clflush_virt_range(vaddr, PAGE_SIZE);
254 memcpy(dst, vaddr, PAGE_SIZE);
255 kunmap_atomic(dst);
256
257 set_page_dirty(page);
258 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson00731152014-05-21 12:42:56 +0100259 mark_page_accessed(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300260 put_page(page);
Chris Wilson00731152014-05-21 12:42:56 +0100261 vaddr += PAGE_SIZE;
262 }
Chris Wilson6a2c4232014-11-04 04:51:40 -0800263 obj->dirty = 0;
Chris Wilson00731152014-05-21 12:42:56 +0100264 }
265
Chris Wilson6a2c4232014-11-04 04:51:40 -0800266 sg_free_table(obj->pages);
267 kfree(obj->pages);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800268}
269
270static void
271i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
272{
273 drm_pci_free(obj->base.dev, obj->phys_handle);
274}
275
276static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
277 .get_pages = i915_gem_object_get_pages_phys,
278 .put_pages = i915_gem_object_put_pages_phys,
279 .release = i915_gem_object_release_phys,
280};
281
Chris Wilson35a96112016-08-14 18:44:40 +0100282int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100283{
284 struct i915_vma *vma;
285 LIST_HEAD(still_in_list);
Chris Wilson02bef8f2016-08-14 18:44:41 +0100286 int ret;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100287
Chris Wilson02bef8f2016-08-14 18:44:41 +0100288 lockdep_assert_held(&obj->base.dev->struct_mutex);
289
290 /* Closed vma are removed from the obj->vma_list - but they may
291 * still have an active binding on the object. To remove those we
292 * must wait for all rendering to complete to the object (as unbinding
293 * must anyway), and retire the requests.
Chris Wilsonaa653a62016-08-04 07:52:27 +0100294 */
Chris Wilson02bef8f2016-08-14 18:44:41 +0100295 ret = i915_gem_object_wait_rendering(obj, false);
296 if (ret)
297 return ret;
298
299 i915_gem_retire_requests(to_i915(obj->base.dev));
300
Chris Wilsonaa653a62016-08-04 07:52:27 +0100301 while ((vma = list_first_entry_or_null(&obj->vma_list,
302 struct i915_vma,
303 obj_link))) {
304 list_move_tail(&vma->obj_link, &still_in_list);
305 ret = i915_vma_unbind(vma);
306 if (ret)
307 break;
308 }
309 list_splice(&still_in_list, &obj->vma_list);
310
311 return ret;
312}
313
Chris Wilson00e60f22016-08-04 16:32:40 +0100314/**
315 * Ensures that all rendering to the object has completed and the object is
316 * safe to unbind from the GTT or access from the CPU.
317 * @obj: i915 gem object
318 * @readonly: waiting for just read access or read-write access
319 */
320int
321i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
322 bool readonly)
323{
324 struct reservation_object *resv;
325 struct i915_gem_active *active;
326 unsigned long active_mask;
327 int idx;
328
329 lockdep_assert_held(&obj->base.dev->struct_mutex);
330
331 if (!readonly) {
332 active = obj->last_read;
333 active_mask = i915_gem_object_get_active(obj);
334 } else {
335 active_mask = 1;
336 active = &obj->last_write;
337 }
338
339 for_each_active(active_mask, idx) {
340 int ret;
341
342 ret = i915_gem_active_wait(&active[idx],
343 &obj->base.dev->struct_mutex);
344 if (ret)
345 return ret;
346 }
347
348 resv = i915_gem_object_get_dmabuf_resv(obj);
349 if (resv) {
350 long err;
351
352 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
353 MAX_SCHEDULE_TIMEOUT);
354 if (err < 0)
355 return err;
356 }
357
358 return 0;
359}
360
Chris Wilsonb8f90962016-08-05 10:14:07 +0100361/* A nonblocking variant of the above wait. Must be called prior to
362 * acquiring the mutex for the object, as the object state may change
363 * during this call. A reference must be held by the caller for the object.
Chris Wilson00e60f22016-08-04 16:32:40 +0100364 */
365static __must_check int
Chris Wilsonb8f90962016-08-05 10:14:07 +0100366__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
367 struct intel_rps_client *rps,
368 bool readonly)
Chris Wilson00e60f22016-08-04 16:32:40 +0100369{
Chris Wilson00e60f22016-08-04 16:32:40 +0100370 struct i915_gem_active *active;
371 unsigned long active_mask;
Chris Wilsonb8f90962016-08-05 10:14:07 +0100372 int idx;
Chris Wilson00e60f22016-08-04 16:32:40 +0100373
Chris Wilsonb8f90962016-08-05 10:14:07 +0100374 active_mask = __I915_BO_ACTIVE(obj);
Chris Wilson00e60f22016-08-04 16:32:40 +0100375 if (!active_mask)
376 return 0;
377
378 if (!readonly) {
379 active = obj->last_read;
380 } else {
381 active_mask = 1;
382 active = &obj->last_write;
383 }
384
Chris Wilsonb8f90962016-08-05 10:14:07 +0100385 for_each_active(active_mask, idx) {
386 int ret;
Chris Wilson00e60f22016-08-04 16:32:40 +0100387
Chris Wilsonb8f90962016-08-05 10:14:07 +0100388 ret = i915_gem_active_wait_unlocked(&active[idx],
389 true, NULL, rps);
390 if (ret)
391 return ret;
Chris Wilson00e60f22016-08-04 16:32:40 +0100392 }
393
Chris Wilsonb8f90962016-08-05 10:14:07 +0100394 return 0;
Chris Wilson00e60f22016-08-04 16:32:40 +0100395}
396
397static struct intel_rps_client *to_rps_client(struct drm_file *file)
398{
399 struct drm_i915_file_private *fpriv = file->driver_priv;
400
401 return &fpriv->rps;
402}
403
Chris Wilson00731152014-05-21 12:42:56 +0100404int
405i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
406 int align)
407{
408 drm_dma_handle_t *phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800409 int ret;
Chris Wilson00731152014-05-21 12:42:56 +0100410
411 if (obj->phys_handle) {
412 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
413 return -EBUSY;
414
415 return 0;
416 }
417
418 if (obj->madv != I915_MADV_WILLNEED)
419 return -EFAULT;
420
421 if (obj->base.filp == NULL)
422 return -EINVAL;
423
Chris Wilson4717ca92016-08-04 07:52:28 +0100424 ret = i915_gem_object_unbind(obj);
425 if (ret)
426 return ret;
427
428 ret = i915_gem_object_put_pages(obj);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800429 if (ret)
430 return ret;
431
Chris Wilson00731152014-05-21 12:42:56 +0100432 /* create a new object */
433 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
434 if (!phys)
435 return -ENOMEM;
436
Chris Wilson00731152014-05-21 12:42:56 +0100437 obj->phys_handle = phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800438 obj->ops = &i915_gem_phys_ops;
439
440 return i915_gem_object_get_pages(obj);
Chris Wilson00731152014-05-21 12:42:56 +0100441}
442
443static int
444i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
445 struct drm_i915_gem_pwrite *args,
446 struct drm_file *file_priv)
447{
448 struct drm_device *dev = obj->base.dev;
449 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300450 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200451 int ret = 0;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800452
453 /* We manually control the domain here and pretend that it
454 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
455 */
456 ret = i915_gem_object_wait_rendering(obj, false);
457 if (ret)
458 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100459
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700460 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson00731152014-05-21 12:42:56 +0100461 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
462 unsigned long unwritten;
463
464 /* The physical object once assigned is fixed for the lifetime
465 * of the obj, so we can safely drop the lock and continue
466 * to access vaddr.
467 */
468 mutex_unlock(&dev->struct_mutex);
469 unwritten = copy_from_user(vaddr, user_data, args->size);
470 mutex_lock(&dev->struct_mutex);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200471 if (unwritten) {
472 ret = -EFAULT;
473 goto out;
474 }
Chris Wilson00731152014-05-21 12:42:56 +0100475 }
476
Chris Wilson6a2c4232014-11-04 04:51:40 -0800477 drm_clflush_virt_range(vaddr, args->size);
Chris Wilsonc0336662016-05-06 15:40:21 +0100478 i915_gem_chipset_flush(to_i915(dev));
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200479
480out:
Rodrigo Vivide152b62015-07-07 16:28:51 -0700481 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200482 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100483}
484
Chris Wilson42dcedd2012-11-15 11:32:30 +0000485void *i915_gem_object_alloc(struct drm_device *dev)
486{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100487 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100488 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000489}
490
491void i915_gem_object_free(struct drm_i915_gem_object *obj)
492{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100493 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100494 kmem_cache_free(dev_priv->objects, obj);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000495}
496
Dave Airlieff72145b2011-02-07 12:16:14 +1000497static int
498i915_gem_create(struct drm_file *file,
499 struct drm_device *dev,
500 uint64_t size,
501 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700502{
Chris Wilson05394f32010-11-08 19:18:58 +0000503 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300504 int ret;
505 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700506
Dave Airlieff72145b2011-02-07 12:16:14 +1000507 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200508 if (size == 0)
509 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700510
511 /* Allocate the new object */
Dave Gordond37cd8a2016-04-22 19:14:32 +0100512 obj = i915_gem_object_create(dev, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100513 if (IS_ERR(obj))
514 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700515
Chris Wilson05394f32010-11-08 19:18:58 +0000516 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100517 /* drop reference from allocate - handle holds it now */
Chris Wilson34911fd2016-07-20 13:31:54 +0100518 i915_gem_object_put_unlocked(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200519 if (ret)
520 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100521
Dave Airlieff72145b2011-02-07 12:16:14 +1000522 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700523 return 0;
524}
525
Dave Airlieff72145b2011-02-07 12:16:14 +1000526int
527i915_gem_dumb_create(struct drm_file *file,
528 struct drm_device *dev,
529 struct drm_mode_create_dumb *args)
530{
531 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300532 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000533 args->size = args->pitch * args->height;
534 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000535 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000536}
537
Dave Airlieff72145b2011-02-07 12:16:14 +1000538/**
539 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100540 * @dev: drm device pointer
541 * @data: ioctl data blob
542 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000543 */
544int
545i915_gem_create_ioctl(struct drm_device *dev, void *data,
546 struct drm_file *file)
547{
548 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200549
Dave Airlieff72145b2011-02-07 12:16:14 +1000550 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000551 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000552}
553
Daniel Vetter8c599672011-12-14 13:57:31 +0100554static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100555__copy_to_user_swizzled(char __user *cpu_vaddr,
556 const char *gpu_vaddr, int gpu_offset,
557 int length)
558{
559 int ret, cpu_offset = 0;
560
561 while (length > 0) {
562 int cacheline_end = ALIGN(gpu_offset + 1, 64);
563 int this_length = min(cacheline_end - gpu_offset, length);
564 int swizzled_gpu_offset = gpu_offset ^ 64;
565
566 ret = __copy_to_user(cpu_vaddr + cpu_offset,
567 gpu_vaddr + swizzled_gpu_offset,
568 this_length);
569 if (ret)
570 return ret + length;
571
572 cpu_offset += this_length;
573 gpu_offset += this_length;
574 length -= this_length;
575 }
576
577 return 0;
578}
579
580static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700581__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
582 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100583 int length)
584{
585 int ret, cpu_offset = 0;
586
587 while (length > 0) {
588 int cacheline_end = ALIGN(gpu_offset + 1, 64);
589 int this_length = min(cacheline_end - gpu_offset, length);
590 int swizzled_gpu_offset = gpu_offset ^ 64;
591
592 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
593 cpu_vaddr + cpu_offset,
594 this_length);
595 if (ret)
596 return ret + length;
597
598 cpu_offset += this_length;
599 gpu_offset += this_length;
600 length -= this_length;
601 }
602
603 return 0;
604}
605
Brad Volkin4c914c02014-02-18 10:15:45 -0800606/*
607 * Pins the specified object's pages and synchronizes the object with
608 * GPU accesses. Sets needs_clflush to non-zero if the caller should
609 * flush the object from the CPU cache.
610 */
611int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
Chris Wilson43394c72016-08-18 17:16:47 +0100612 unsigned int *needs_clflush)
Brad Volkin4c914c02014-02-18 10:15:45 -0800613{
614 int ret;
615
616 *needs_clflush = 0;
617
Chris Wilson43394c72016-08-18 17:16:47 +0100618 if (!i915_gem_object_has_struct_page(obj))
619 return -ENODEV;
Brad Volkin4c914c02014-02-18 10:15:45 -0800620
Chris Wilsonc13d87e2016-07-20 09:21:15 +0100621 ret = i915_gem_object_wait_rendering(obj, true);
622 if (ret)
623 return ret;
624
Chris Wilson97649512016-08-18 17:16:50 +0100625 ret = i915_gem_object_get_pages(obj);
626 if (ret)
627 return ret;
628
629 i915_gem_object_pin_pages(obj);
630
Chris Wilsona314d5c2016-08-18 17:16:48 +0100631 i915_gem_object_flush_gtt_write_domain(obj);
632
Chris Wilson43394c72016-08-18 17:16:47 +0100633 /* If we're not in the cpu read domain, set ourself into the gtt
634 * read domain and manually flush cachelines (if required). This
635 * optimizes for the case when the gpu will dirty the data
636 * anyway again before the next pread happens.
637 */
638 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
Brad Volkin4c914c02014-02-18 10:15:45 -0800639 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
640 obj->cache_level);
Brad Volkin4c914c02014-02-18 10:15:45 -0800641
Chris Wilson43394c72016-08-18 17:16:47 +0100642 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
643 ret = i915_gem_object_set_to_cpu_domain(obj, false);
Chris Wilson97649512016-08-18 17:16:50 +0100644 if (ret)
645 goto err_unpin;
646
Chris Wilson43394c72016-08-18 17:16:47 +0100647 *needs_clflush = 0;
648 }
649
Chris Wilson97649512016-08-18 17:16:50 +0100650 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100651 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100652
653err_unpin:
654 i915_gem_object_unpin_pages(obj);
655 return ret;
Chris Wilson43394c72016-08-18 17:16:47 +0100656}
657
658int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
659 unsigned int *needs_clflush)
660{
661 int ret;
662
663 *needs_clflush = 0;
664 if (!i915_gem_object_has_struct_page(obj))
665 return -ENODEV;
666
667 ret = i915_gem_object_wait_rendering(obj, false);
668 if (ret)
669 return ret;
670
Chris Wilson97649512016-08-18 17:16:50 +0100671 ret = i915_gem_object_get_pages(obj);
672 if (ret)
673 return ret;
674
675 i915_gem_object_pin_pages(obj);
676
Chris Wilsona314d5c2016-08-18 17:16:48 +0100677 i915_gem_object_flush_gtt_write_domain(obj);
678
Chris Wilson43394c72016-08-18 17:16:47 +0100679 /* If we're not in the cpu write domain, set ourself into the
680 * gtt write domain and manually flush cachelines (as required).
681 * This optimizes for the case when the gpu will use the data
682 * right away and we therefore have to clflush anyway.
683 */
684 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
685 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
686
687 /* Same trick applies to invalidate partially written cachelines read
688 * before writing.
689 */
690 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
691 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
692 obj->cache_level);
693
Chris Wilson43394c72016-08-18 17:16:47 +0100694 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
695 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilson97649512016-08-18 17:16:50 +0100696 if (ret)
697 goto err_unpin;
698
Chris Wilson43394c72016-08-18 17:16:47 +0100699 *needs_clflush = 0;
700 }
701
702 if ((*needs_clflush & CLFLUSH_AFTER) == 0)
703 obj->cache_dirty = true;
704
705 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
706 obj->dirty = 1;
Chris Wilson97649512016-08-18 17:16:50 +0100707 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100708 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100709
710err_unpin:
711 i915_gem_object_unpin_pages(obj);
712 return ret;
Brad Volkin4c914c02014-02-18 10:15:45 -0800713}
714
Daniel Vetterd174bd62012-03-25 19:47:40 +0200715/* Per-page copy function for the shmem pread fastpath.
716 * Flushes invalid cachelines before reading the target if
717 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700718static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200719shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
720 char __user *user_data,
721 bool page_do_bit17_swizzling, bool needs_clflush)
722{
723 char *vaddr;
724 int ret;
725
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200726 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200727 return -EINVAL;
728
729 vaddr = kmap_atomic(page);
730 if (needs_clflush)
731 drm_clflush_virt_range(vaddr + shmem_page_offset,
732 page_length);
733 ret = __copy_to_user_inatomic(user_data,
734 vaddr + shmem_page_offset,
735 page_length);
736 kunmap_atomic(vaddr);
737
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100738 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200739}
740
Daniel Vetter23c18c72012-03-25 19:47:42 +0200741static void
742shmem_clflush_swizzled_range(char *addr, unsigned long length,
743 bool swizzled)
744{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200745 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200746 unsigned long start = (unsigned long) addr;
747 unsigned long end = (unsigned long) addr + length;
748
749 /* For swizzling simply ensure that we always flush both
750 * channels. Lame, but simple and it works. Swizzled
751 * pwrite/pread is far from a hotpath - current userspace
752 * doesn't use it at all. */
753 start = round_down(start, 128);
754 end = round_up(end, 128);
755
756 drm_clflush_virt_range((void *)start, end - start);
757 } else {
758 drm_clflush_virt_range(addr, length);
759 }
760
761}
762
Daniel Vetterd174bd62012-03-25 19:47:40 +0200763/* Only difference to the fast-path function is that this can handle bit17
764 * and uses non-atomic copy and kmap functions. */
765static int
766shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
767 char __user *user_data,
768 bool page_do_bit17_swizzling, bool needs_clflush)
769{
770 char *vaddr;
771 int ret;
772
773 vaddr = kmap(page);
774 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200775 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
776 page_length,
777 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200778
779 if (page_do_bit17_swizzling)
780 ret = __copy_to_user_swizzled(user_data,
781 vaddr, shmem_page_offset,
782 page_length);
783 else
784 ret = __copy_to_user(user_data,
785 vaddr + shmem_page_offset,
786 page_length);
787 kunmap(page);
788
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100789 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200790}
791
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530792static inline unsigned long
793slow_user_access(struct io_mapping *mapping,
794 uint64_t page_base, int page_offset,
795 char __user *user_data,
796 unsigned long length, bool pwrite)
797{
798 void __iomem *ioaddr;
799 void *vaddr;
800 uint64_t unwritten;
801
802 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
803 /* We can use the cpu mem copy function because this is X86. */
804 vaddr = (void __force *)ioaddr + page_offset;
805 if (pwrite)
806 unwritten = __copy_from_user(vaddr, user_data, length);
807 else
808 unwritten = __copy_to_user(user_data, vaddr, length);
809
810 io_mapping_unmap(ioaddr);
811 return unwritten;
812}
813
814static int
815i915_gem_gtt_pread(struct drm_device *dev,
816 struct drm_i915_gem_object *obj, uint64_t size,
817 uint64_t data_offset, uint64_t data_ptr)
818{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100819 struct drm_i915_private *dev_priv = to_i915(dev);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530820 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson058d88c2016-08-15 10:49:06 +0100821 struct i915_vma *vma;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530822 struct drm_mm_node node;
823 char __user *user_data;
824 uint64_t remain;
825 uint64_t offset;
826 int ret;
827
Chris Wilson058d88c2016-08-15 10:49:06 +0100828 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
Chris Wilson18034582016-08-18 17:16:45 +0100829 if (!IS_ERR(vma)) {
830 node.start = i915_ggtt_offset(vma);
831 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100832 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100833 if (ret) {
834 i915_vma_unpin(vma);
835 vma = ERR_PTR(ret);
836 }
837 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100838 if (IS_ERR(vma)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530839 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
840 if (ret)
841 goto out;
842
843 ret = i915_gem_object_get_pages(obj);
844 if (ret) {
845 remove_mappable_node(&node);
846 goto out;
847 }
848
849 i915_gem_object_pin_pages(obj);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530850 }
851
852 ret = i915_gem_object_set_to_gtt_domain(obj, false);
853 if (ret)
854 goto out_unpin;
855
856 user_data = u64_to_user_ptr(data_ptr);
857 remain = size;
858 offset = data_offset;
859
860 mutex_unlock(&dev->struct_mutex);
861 if (likely(!i915.prefault_disable)) {
862 ret = fault_in_multipages_writeable(user_data, remain);
863 if (ret) {
864 mutex_lock(&dev->struct_mutex);
865 goto out_unpin;
866 }
867 }
868
869 while (remain > 0) {
870 /* Operation in this page
871 *
872 * page_base = page offset within aperture
873 * page_offset = offset within page
874 * page_length = bytes to copy for this page
875 */
876 u32 page_base = node.start;
877 unsigned page_offset = offset_in_page(offset);
878 unsigned page_length = PAGE_SIZE - page_offset;
879 page_length = remain < page_length ? remain : page_length;
880 if (node.allocated) {
881 wmb();
882 ggtt->base.insert_page(&ggtt->base,
883 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
884 node.start,
885 I915_CACHE_NONE, 0);
886 wmb();
887 } else {
888 page_base += offset & PAGE_MASK;
889 }
890 /* This is a slow read/write as it tries to read from
891 * and write to user memory which may result into page
892 * faults, and so we cannot perform this under struct_mutex.
893 */
Chris Wilsonf7bbe782016-08-19 16:54:27 +0100894 if (slow_user_access(&ggtt->mappable, page_base,
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530895 page_offset, user_data,
896 page_length, false)) {
897 ret = -EFAULT;
898 break;
899 }
900
901 remain -= page_length;
902 user_data += page_length;
903 offset += page_length;
904 }
905
906 mutex_lock(&dev->struct_mutex);
907 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
908 /* The user has modified the object whilst we tried
909 * reading from it, and we now have no idea what domain
910 * the pages should be in. As we have just been touching
911 * them directly, flush everything back to the GTT
912 * domain.
913 */
914 ret = i915_gem_object_set_to_gtt_domain(obj, false);
915 }
916
917out_unpin:
918 if (node.allocated) {
919 wmb();
920 ggtt->base.clear_range(&ggtt->base,
921 node.start, node.size,
922 true);
923 i915_gem_object_unpin_pages(obj);
924 remove_mappable_node(&node);
925 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100926 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530927 }
928out:
929 return ret;
930}
931
Eric Anholteb014592009-03-10 11:44:52 -0700932static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200933i915_gem_shmem_pread(struct drm_device *dev,
934 struct drm_i915_gem_object *obj,
935 struct drm_i915_gem_pread *args,
936 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700937{
Daniel Vetter8461d222011-12-14 13:57:32 +0100938 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700939 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100940 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100941 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100942 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200943 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200944 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200945 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700946
Brad Volkin4c914c02014-02-18 10:15:45 -0800947 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100948 if (ret)
949 return ret;
950
Chris Wilson43394c72016-08-18 17:16:47 +0100951 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
952 user_data = u64_to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700953 offset = args->offset;
Chris Wilson43394c72016-08-18 17:16:47 +0100954 remain = args->size;
Daniel Vetter8461d222011-12-14 13:57:32 +0100955
Imre Deak67d5a502013-02-18 19:28:02 +0200956 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
957 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200958 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100959
960 if (remain <= 0)
961 break;
962
Eric Anholteb014592009-03-10 11:44:52 -0700963 /* Operation in this page
964 *
Eric Anholteb014592009-03-10 11:44:52 -0700965 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700966 * page_length = bytes to copy for this page
967 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100968 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700969 page_length = remain;
970 if ((shmem_page_offset + page_length) > PAGE_SIZE)
971 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700972
Daniel Vetter8461d222011-12-14 13:57:32 +0100973 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
974 (page_to_phys(page) & (1 << 17)) != 0;
975
Daniel Vetterd174bd62012-03-25 19:47:40 +0200976 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
977 user_data, page_do_bit17_swizzling,
978 needs_clflush);
979 if (ret == 0)
980 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700981
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200982 mutex_unlock(&dev->struct_mutex);
983
Jani Nikulad330a952014-01-21 11:24:25 +0200984 if (likely(!i915.prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200985 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200986 /* Userspace is tricking us, but we've already clobbered
987 * its pages with the prefault and promised to write the
988 * data up to the first fault. Hence ignore any errors
989 * and just continue. */
990 (void)ret;
991 prefaulted = 1;
992 }
993
Daniel Vetterd174bd62012-03-25 19:47:40 +0200994 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
995 user_data, page_do_bit17_swizzling,
996 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700997
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200998 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100999
Chris Wilsonf60d7f02012-09-04 21:02:56 +01001000 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +01001001 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +01001002
Chris Wilson17793c92014-03-07 08:30:36 +00001003next_page:
Eric Anholteb014592009-03-10 11:44:52 -07001004 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +01001005 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -07001006 offset += page_length;
1007 }
1008
Chris Wilson4f27b752010-10-14 15:26:45 +01001009out:
Chris Wilson43394c72016-08-18 17:16:47 +01001010 i915_gem_obj_finish_shmem_access(obj);
Chris Wilsonf60d7f02012-09-04 21:02:56 +01001011
Eric Anholteb014592009-03-10 11:44:52 -07001012 return ret;
1013}
1014
Eric Anholt673a3942008-07-30 12:06:12 -07001015/**
1016 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001017 * @dev: drm device pointer
1018 * @data: ioctl data blob
1019 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -07001020 *
1021 * On error, the contents of *data are undefined.
1022 */
1023int
1024i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001025 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001026{
1027 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001028 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +01001029 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001030
Chris Wilson51311d02010-11-17 09:10:42 +00001031 if (args->size == 0)
1032 return 0;
1033
1034 if (!access_ok(VERIFY_WRITE,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001035 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001036 args->size))
1037 return -EFAULT;
1038
Chris Wilson03ac0642016-07-20 13:31:51 +01001039 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001040 if (!obj)
1041 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001042
Chris Wilson7dcd2492010-09-26 20:21:44 +01001043 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +00001044 if (args->offset > obj->base.size ||
1045 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001046 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001047 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001048 }
1049
Chris Wilsondb53a302011-02-03 11:57:46 +00001050 trace_i915_gem_object_pread(obj, args->offset, args->size);
1051
Chris Wilson258a5ed2016-08-05 10:14:16 +01001052 ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
1053 if (ret)
1054 goto err;
1055
1056 ret = i915_mutex_lock_interruptible(dev);
1057 if (ret)
1058 goto err;
1059
Daniel Vetterdbf7bff2012-03-25 19:47:29 +02001060 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -07001061
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301062 /* pread for non shmem backed objects */
Chris Wilson1dd5b6f2016-08-04 09:09:53 +01001063 if (ret == -EFAULT || ret == -ENODEV) {
1064 intel_runtime_pm_get(to_i915(dev));
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301065 ret = i915_gem_gtt_pread(dev, obj, args->size,
1066 args->offset, args->data_ptr);
Chris Wilson1dd5b6f2016-08-04 09:09:53 +01001067 intel_runtime_pm_put(to_i915(dev));
1068 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301069
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001070 i915_gem_object_put(obj);
Chris Wilson4f27b752010-10-14 15:26:45 +01001071 mutex_unlock(&dev->struct_mutex);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001072
1073 return ret;
1074
1075err:
1076 i915_gem_object_put_unlocked(obj);
Eric Anholteb014592009-03-10 11:44:52 -07001077 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001078}
1079
Keith Packard0839ccb2008-10-30 19:38:48 -07001080/* This is the fast write path which cannot handle
1081 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001082 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001083
Keith Packard0839ccb2008-10-30 19:38:48 -07001084static inline int
1085fast_user_write(struct io_mapping *mapping,
1086 loff_t page_base, int page_offset,
1087 char __user *user_data,
1088 int length)
1089{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001090 void __iomem *vaddr_atomic;
1091 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -07001092 unsigned long unwritten;
1093
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07001094 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001095 /* We can use the cpu mem copy function because this is X86. */
1096 vaddr = (void __force*)vaddr_atomic + page_offset;
1097 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -07001098 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07001099 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001100 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -07001101}
1102
Eric Anholt3de09aa2009-03-09 09:42:23 -07001103/**
1104 * This is the fast pwrite path, where we copy the data directly from the
1105 * user into the GTT, uncached.
Daniel Vetter62f90b32016-07-15 21:48:07 +02001106 * @i915: i915 device private data
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001107 * @obj: i915 gem object
1108 * @args: pwrite arguments structure
1109 * @file: drm file pointer
Eric Anholt3de09aa2009-03-09 09:42:23 -07001110 */
Eric Anholt673a3942008-07-30 12:06:12 -07001111static int
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301112i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
Chris Wilson05394f32010-11-08 19:18:58 +00001113 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -07001114 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +00001115 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001116{
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301117 struct i915_ggtt *ggtt = &i915->ggtt;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301118 struct drm_device *dev = obj->base.dev;
Chris Wilson058d88c2016-08-15 10:49:06 +01001119 struct i915_vma *vma;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301120 struct drm_mm_node node;
1121 uint64_t remain, offset;
Eric Anholt673a3942008-07-30 12:06:12 -07001122 char __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301123 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301124 bool hit_slow_path = false;
1125
Chris Wilson3e510a82016-08-05 10:14:23 +01001126 if (i915_gem_object_is_tiled(obj))
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301127 return -EFAULT;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001128
Chris Wilson058d88c2016-08-15 10:49:06 +01001129 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsonde895082016-08-04 16:32:34 +01001130 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +01001131 if (!IS_ERR(vma)) {
1132 node.start = i915_ggtt_offset(vma);
1133 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +01001134 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +01001135 if (ret) {
1136 i915_vma_unpin(vma);
1137 vma = ERR_PTR(ret);
1138 }
1139 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001140 if (IS_ERR(vma)) {
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301141 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
1142 if (ret)
1143 goto out;
1144
1145 ret = i915_gem_object_get_pages(obj);
1146 if (ret) {
1147 remove_mappable_node(&node);
1148 goto out;
1149 }
1150
1151 i915_gem_object_pin_pages(obj);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301152 }
Daniel Vetter935aaa62012-03-25 19:47:35 +02001153
1154 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1155 if (ret)
1156 goto out_unpin;
1157
Chris Wilsonb19482d2016-08-18 17:16:43 +01001158 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301159 obj->dirty = true;
Paulo Zanoni063e4e62015-02-13 17:23:45 -02001160
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301161 user_data = u64_to_user_ptr(args->data_ptr);
1162 offset = args->offset;
1163 remain = args->size;
1164 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -07001165 /* Operation in this page
1166 *
Keith Packard0839ccb2008-10-30 19:38:48 -07001167 * page_base = page offset within aperture
1168 * page_offset = offset within page
1169 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -07001170 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301171 u32 page_base = node.start;
1172 unsigned page_offset = offset_in_page(offset);
1173 unsigned page_length = PAGE_SIZE - page_offset;
1174 page_length = remain < page_length ? remain : page_length;
1175 if (node.allocated) {
1176 wmb(); /* flush the write before we modify the GGTT */
1177 ggtt->base.insert_page(&ggtt->base,
1178 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1179 node.start, I915_CACHE_NONE, 0);
1180 wmb(); /* flush modifications to the GGTT (insert_page) */
1181 } else {
1182 page_base += offset & PAGE_MASK;
1183 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001184 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -07001185 * source page isn't available. Return the error and we'll
1186 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301187 * If the object is non-shmem backed, we retry again with the
1188 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -07001189 */
Chris Wilsonf7bbe782016-08-19 16:54:27 +01001190 if (fast_user_write(&ggtt->mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +02001191 page_offset, user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301192 hit_slow_path = true;
1193 mutex_unlock(&dev->struct_mutex);
Chris Wilsonf7bbe782016-08-19 16:54:27 +01001194 if (slow_user_access(&ggtt->mappable,
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301195 page_base,
1196 page_offset, user_data,
1197 page_length, true)) {
1198 ret = -EFAULT;
1199 mutex_lock(&dev->struct_mutex);
1200 goto out_flush;
1201 }
1202
1203 mutex_lock(&dev->struct_mutex);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001204 }
Eric Anholt673a3942008-07-30 12:06:12 -07001205
Keith Packard0839ccb2008-10-30 19:38:48 -07001206 remain -= page_length;
1207 user_data += page_length;
1208 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -07001209 }
Eric Anholt673a3942008-07-30 12:06:12 -07001210
Paulo Zanoni063e4e62015-02-13 17:23:45 -02001211out_flush:
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301212 if (hit_slow_path) {
1213 if (ret == 0 &&
1214 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1215 /* The user has modified the object whilst we tried
1216 * reading from it, and we now have no idea what domain
1217 * the pages should be in. As we have just been touching
1218 * them directly, flush everything back to the GTT
1219 * domain.
1220 */
1221 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1222 }
1223 }
1224
Chris Wilsonb19482d2016-08-18 17:16:43 +01001225 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001226out_unpin:
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301227 if (node.allocated) {
1228 wmb();
1229 ggtt->base.clear_range(&ggtt->base,
1230 node.start, node.size,
1231 true);
1232 i915_gem_object_unpin_pages(obj);
1233 remove_mappable_node(&node);
1234 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001235 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301236 }
Daniel Vetter935aaa62012-03-25 19:47:35 +02001237out:
Eric Anholt3de09aa2009-03-09 09:42:23 -07001238 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001239}
1240
Daniel Vetterd174bd62012-03-25 19:47:40 +02001241/* Per-page copy function for the shmem pwrite fastpath.
1242 * Flushes invalid cachelines before writing to the target if
1243 * needs_clflush_before is set and flushes out any written cachelines after
1244 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -07001245static int
Daniel Vetterd174bd62012-03-25 19:47:40 +02001246shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1247 char __user *user_data,
1248 bool page_do_bit17_swizzling,
1249 bool needs_clflush_before,
1250 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001251{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001252 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001253 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001254
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001255 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +02001256 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001257
Daniel Vetterd174bd62012-03-25 19:47:40 +02001258 vaddr = kmap_atomic(page);
1259 if (needs_clflush_before)
1260 drm_clflush_virt_range(vaddr + shmem_page_offset,
1261 page_length);
Chris Wilsonc2831a92014-03-07 08:30:37 +00001262 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1263 user_data, page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001264 if (needs_clflush_after)
1265 drm_clflush_virt_range(vaddr + shmem_page_offset,
1266 page_length);
1267 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -07001268
Chris Wilson755d2212012-09-04 21:02:55 +01001269 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001270}
1271
Daniel Vetterd174bd62012-03-25 19:47:40 +02001272/* Only difference to the fast-path function is that this can handle bit17
1273 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -07001274static int
Daniel Vetterd174bd62012-03-25 19:47:40 +02001275shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1276 char __user *user_data,
1277 bool page_do_bit17_swizzling,
1278 bool needs_clflush_before,
1279 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001280{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001281 char *vaddr;
1282 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001283
Daniel Vetterd174bd62012-03-25 19:47:40 +02001284 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001285 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +02001286 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1287 page_length,
1288 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001289 if (page_do_bit17_swizzling)
1290 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001291 user_data,
1292 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001293 else
1294 ret = __copy_from_user(vaddr + shmem_page_offset,
1295 user_data,
1296 page_length);
1297 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +02001298 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1299 page_length,
1300 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001301 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001302
Chris Wilson755d2212012-09-04 21:02:55 +01001303 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001304}
1305
Eric Anholt40123c12009-03-09 13:42:30 -07001306static int
Daniel Vettere244a442012-03-25 19:47:28 +02001307i915_gem_shmem_pwrite(struct drm_device *dev,
1308 struct drm_i915_gem_object *obj,
1309 struct drm_i915_gem_pwrite *args,
1310 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -07001311{
Eric Anholt40123c12009-03-09 13:42:30 -07001312 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +01001313 loff_t offset;
1314 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +01001315 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +01001316 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +02001317 int hit_slowpath = 0;
Chris Wilson43394c72016-08-18 17:16:47 +01001318 unsigned int needs_clflush;
Imre Deak67d5a502013-02-18 19:28:02 +02001319 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -07001320
Chris Wilson43394c72016-08-18 17:16:47 +01001321 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1322 if (ret)
1323 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001324
Daniel Vetter8c599672011-12-14 13:57:31 +01001325 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Chris Wilson43394c72016-08-18 17:16:47 +01001326 user_data = u64_to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -07001327 offset = args->offset;
Chris Wilson43394c72016-08-18 17:16:47 +01001328 remain = args->size;
Eric Anholt40123c12009-03-09 13:42:30 -07001329
Imre Deak67d5a502013-02-18 19:28:02 +02001330 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1331 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +02001332 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +02001333 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001334
Chris Wilson9da3da62012-06-01 15:20:22 +01001335 if (remain <= 0)
1336 break;
1337
Eric Anholt40123c12009-03-09 13:42:30 -07001338 /* Operation in this page
1339 *
Eric Anholt40123c12009-03-09 13:42:30 -07001340 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -07001341 * page_length = bytes to copy for this page
1342 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +01001343 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -07001344
1345 page_length = remain;
1346 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1347 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -07001348
Daniel Vetter58642882012-03-25 19:47:37 +02001349 /* If we don't overwrite a cacheline completely we need to be
1350 * careful to have up-to-date data by first clflushing. Don't
1351 * overcomplicate things and flush the entire patch. */
Chris Wilson43394c72016-08-18 17:16:47 +01001352 partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
Daniel Vetter58642882012-03-25 19:47:37 +02001353 ((shmem_page_offset | page_length)
1354 & (boot_cpu_data.x86_clflush_size - 1));
1355
Daniel Vetter8c599672011-12-14 13:57:31 +01001356 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1357 (page_to_phys(page) & (1 << 17)) != 0;
1358
Daniel Vetterd174bd62012-03-25 19:47:40 +02001359 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1360 user_data, page_do_bit17_swizzling,
1361 partial_cacheline_write,
Chris Wilson43394c72016-08-18 17:16:47 +01001362 needs_clflush & CLFLUSH_AFTER);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001363 if (ret == 0)
1364 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -07001365
Daniel Vettere244a442012-03-25 19:47:28 +02001366 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +02001367 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001368 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1369 user_data, page_do_bit17_swizzling,
1370 partial_cacheline_write,
Chris Wilson43394c72016-08-18 17:16:47 +01001371 needs_clflush & CLFLUSH_AFTER);
Eric Anholt40123c12009-03-09 13:42:30 -07001372
Daniel Vettere244a442012-03-25 19:47:28 +02001373 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +01001374
Chris Wilson755d2212012-09-04 21:02:55 +01001375 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +01001376 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +01001377
Chris Wilson17793c92014-03-07 08:30:36 +00001378next_page:
Eric Anholt40123c12009-03-09 13:42:30 -07001379 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +01001380 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -07001381 offset += page_length;
1382 }
1383
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001384out:
Chris Wilson43394c72016-08-18 17:16:47 +01001385 i915_gem_obj_finish_shmem_access(obj);
Chris Wilson755d2212012-09-04 21:02:55 +01001386
Daniel Vettere244a442012-03-25 19:47:28 +02001387 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +01001388 /*
1389 * Fixup: Flush cpu caches in case we didn't flush the dirty
1390 * cachelines in-line while writing and the object moved
1391 * out of the cpu write domain while we've dropped the lock.
1392 */
Chris Wilson43394c72016-08-18 17:16:47 +01001393 if (!(needs_clflush & CLFLUSH_AFTER) &&
Daniel Vetter8dcf0152012-11-15 16:53:58 +01001394 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +01001395 if (i915_gem_clflush_object(obj, obj->pin_display))
Chris Wilson43394c72016-08-18 17:16:47 +01001396 needs_clflush |= CLFLUSH_AFTER;
Daniel Vettere244a442012-03-25 19:47:28 +02001397 }
Daniel Vetter8c599672011-12-14 13:57:31 +01001398 }
Eric Anholt40123c12009-03-09 13:42:30 -07001399
Chris Wilson43394c72016-08-18 17:16:47 +01001400 if (needs_clflush & CLFLUSH_AFTER)
Chris Wilsonc0336662016-05-06 15:40:21 +01001401 i915_gem_chipset_flush(to_i915(dev));
Daniel Vetter58642882012-03-25 19:47:37 +02001402
Rodrigo Vivide152b62015-07-07 16:28:51 -07001403 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Eric Anholt40123c12009-03-09 13:42:30 -07001404 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001405}
1406
1407/**
1408 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001409 * @dev: drm device
1410 * @data: ioctl data blob
1411 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001412 *
1413 * On error, the contents of the buffer that were to be modified are undefined.
1414 */
1415int
1416i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001417 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001418{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001419 struct drm_i915_private *dev_priv = to_i915(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001420 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001421 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +00001422 int ret;
1423
1424 if (args->size == 0)
1425 return 0;
1426
1427 if (!access_ok(VERIFY_READ,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001428 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001429 args->size))
1430 return -EFAULT;
1431
Jani Nikulad330a952014-01-21 11:24:25 +02001432 if (likely(!i915.prefault_disable)) {
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001433 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
Xiong Zhang0b74b502013-07-19 13:51:24 +08001434 args->size);
1435 if (ret)
1436 return -EFAULT;
1437 }
Eric Anholt673a3942008-07-30 12:06:12 -07001438
Chris Wilson03ac0642016-07-20 13:31:51 +01001439 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001440 if (!obj)
1441 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001442
Chris Wilson7dcd2492010-09-26 20:21:44 +01001443 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +00001444 if (args->offset > obj->base.size ||
1445 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001446 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001447 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001448 }
1449
Chris Wilsondb53a302011-02-03 11:57:46 +00001450 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1451
Chris Wilson258a5ed2016-08-05 10:14:16 +01001452 ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
1453 if (ret)
1454 goto err;
1455
1456 intel_runtime_pm_get(dev_priv);
1457
1458 ret = i915_mutex_lock_interruptible(dev);
1459 if (ret)
1460 goto err_rpm;
1461
Daniel Vetter935aaa62012-03-25 19:47:35 +02001462 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -07001463 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1464 * it would end up going through the fenced access, and we'll get
1465 * different detiling behavior between reading and writing.
1466 * pread/pwrite currently are reading and writing from the CPU
1467 * perspective, requiring manual detiling by the client.
1468 */
Chris Wilson6eae0052016-06-20 15:05:52 +01001469 if (!i915_gem_object_has_struct_page(obj) ||
1470 cpu_write_needs_clflush(obj)) {
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301471 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001472 /* Note that the gtt paths might fail with non-page-backed user
1473 * pointers (e.g. gtt mappings when moving data between
1474 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -07001475 }
Eric Anholt673a3942008-07-30 12:06:12 -07001476
Chris Wilsond1054ee2016-07-16 18:42:36 +01001477 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -08001478 if (obj->phys_handle)
1479 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301480 else
Chris Wilson43394c72016-08-18 17:16:47 +01001481 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Chris Wilson6a2c4232014-11-04 04:51:40 -08001482 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +01001483
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001484 i915_gem_object_put(obj);
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001485 mutex_unlock(&dev->struct_mutex);
Imre Deak5d77d9c2014-11-12 16:40:35 +02001486 intel_runtime_pm_put(dev_priv);
1487
Eric Anholt673a3942008-07-30 12:06:12 -07001488 return ret;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001489
1490err_rpm:
1491 intel_runtime_pm_put(dev_priv);
1492err:
1493 i915_gem_object_put_unlocked(obj);
1494 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001495}
1496
Chris Wilsond243ad82016-08-18 17:16:44 +01001497static inline enum fb_op_origin
Chris Wilsonaeecc962016-06-17 14:46:39 -03001498write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1499{
Chris Wilson50349242016-08-18 17:17:04 +01001500 return (domain == I915_GEM_DOMAIN_GTT ?
1501 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001502}
1503
Eric Anholt673a3942008-07-30 12:06:12 -07001504/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001505 * Called when user space prepares to use an object with the CPU, either
1506 * through the mmap ioctl's mapping or a GTT mapping.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001507 * @dev: drm device
1508 * @data: ioctl data blob
1509 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001510 */
1511int
1512i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001513 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001514{
1515 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001516 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001517 uint32_t read_domains = args->read_domains;
1518 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001519 int ret;
1520
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001521 /* Only handle setting domains to types used by the CPU. */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001522 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001523 return -EINVAL;
1524
1525 /* Having something in the write domain implies it's in the read
1526 * domain, and only that read domain. Enforce that in the request.
1527 */
1528 if (write_domain != 0 && read_domains != write_domain)
1529 return -EINVAL;
1530
Chris Wilson03ac0642016-07-20 13:31:51 +01001531 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001532 if (!obj)
1533 return -ENOENT;
Jesse Barnes652c3932009-08-17 13:31:43 -07001534
Chris Wilson3236f572012-08-24 09:35:09 +01001535 /* Try to flush the object off the GPU without holding the lock.
1536 * We will repeat the flush holding the lock in the normal manner
1537 * to catch cases where we are gazumped.
1538 */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001539 ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001540 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001541 goto err;
1542
1543 ret = i915_mutex_lock_interruptible(dev);
1544 if (ret)
1545 goto err;
Chris Wilson3236f572012-08-24 09:35:09 +01001546
Chris Wilson43566de2015-01-02 16:29:29 +05301547 if (read_domains & I915_GEM_DOMAIN_GTT)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001548 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Chris Wilson43566de2015-01-02 16:29:29 +05301549 else
Eric Anholte47c68e2008-11-14 13:35:19 -08001550 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001551
Daniel Vetter031b6982015-06-26 19:35:16 +02001552 if (write_domain != 0)
Chris Wilsonaeecc962016-06-17 14:46:39 -03001553 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
Daniel Vetter031b6982015-06-26 19:35:16 +02001554
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001555 i915_gem_object_put(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001556 mutex_unlock(&dev->struct_mutex);
1557 return ret;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001558
1559err:
1560 i915_gem_object_put_unlocked(obj);
1561 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001562}
1563
1564/**
1565 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001566 * @dev: drm device
1567 * @data: ioctl data blob
1568 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001569 */
1570int
1571i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001572 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001573{
1574 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001575 struct drm_i915_gem_object *obj;
Chris Wilsonc21724c2016-08-05 10:14:19 +01001576 int err = 0;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001577
Chris Wilson03ac0642016-07-20 13:31:51 +01001578 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +01001579 if (!obj)
1580 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001581
Eric Anholt673a3942008-07-30 12:06:12 -07001582 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilsonc21724c2016-08-05 10:14:19 +01001583 if (READ_ONCE(obj->pin_display)) {
1584 err = i915_mutex_lock_interruptible(dev);
1585 if (!err) {
1586 i915_gem_object_flush_cpu_write_domain(obj);
1587 mutex_unlock(&dev->struct_mutex);
1588 }
1589 }
Eric Anholte47c68e2008-11-14 13:35:19 -08001590
Chris Wilsonc21724c2016-08-05 10:14:19 +01001591 i915_gem_object_put_unlocked(obj);
1592 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07001593}
1594
1595/**
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001596 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1597 * it is mapped to.
1598 * @dev: drm device
1599 * @data: ioctl data blob
1600 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001601 *
1602 * While the mapping holds a reference on the contents of the object, it doesn't
1603 * imply a ref on the object itself.
Daniel Vetter34367382014-10-16 12:28:18 +02001604 *
1605 * IMPORTANT:
1606 *
1607 * DRM driver writers who look a this function as an example for how to do GEM
1608 * mmap support, please don't implement mmap support like here. The modern way
1609 * to implement DRM mmap support is with an mmap offset ioctl (like
1610 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1611 * That way debug tooling like valgrind will understand what's going on, hiding
1612 * the mmap call in a driver private ioctl will break that. The i915 driver only
1613 * does cpu mmaps this way because we didn't know better.
Eric Anholt673a3942008-07-30 12:06:12 -07001614 */
1615int
1616i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001617 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001618{
1619 struct drm_i915_gem_mmap *args = data;
Chris Wilson03ac0642016-07-20 13:31:51 +01001620 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001621 unsigned long addr;
1622
Akash Goel1816f922015-01-02 16:29:30 +05301623 if (args->flags & ~(I915_MMAP_WC))
1624 return -EINVAL;
1625
Borislav Petkov568a58e2016-03-29 17:42:01 +02001626 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
Akash Goel1816f922015-01-02 16:29:30 +05301627 return -ENODEV;
1628
Chris Wilson03ac0642016-07-20 13:31:51 +01001629 obj = i915_gem_object_lookup(file, args->handle);
1630 if (!obj)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001631 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001632
Daniel Vetter1286ff72012-05-10 15:25:09 +02001633 /* prime objects have no backing filp to GEM mmap
1634 * pages from.
1635 */
Chris Wilson03ac0642016-07-20 13:31:51 +01001636 if (!obj->base.filp) {
Chris Wilson34911fd2016-07-20 13:31:54 +01001637 i915_gem_object_put_unlocked(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001638 return -EINVAL;
1639 }
1640
Chris Wilson03ac0642016-07-20 13:31:51 +01001641 addr = vm_mmap(obj->base.filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001642 PROT_READ | PROT_WRITE, MAP_SHARED,
1643 args->offset);
Akash Goel1816f922015-01-02 16:29:30 +05301644 if (args->flags & I915_MMAP_WC) {
1645 struct mm_struct *mm = current->mm;
1646 struct vm_area_struct *vma;
1647
Michal Hocko80a89a52016-05-23 16:26:11 -07001648 if (down_write_killable(&mm->mmap_sem)) {
Chris Wilson34911fd2016-07-20 13:31:54 +01001649 i915_gem_object_put_unlocked(obj);
Michal Hocko80a89a52016-05-23 16:26:11 -07001650 return -EINTR;
1651 }
Akash Goel1816f922015-01-02 16:29:30 +05301652 vma = find_vma(mm, addr);
1653 if (vma)
1654 vma->vm_page_prot =
1655 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1656 else
1657 addr = -ENOMEM;
1658 up_write(&mm->mmap_sem);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001659
1660 /* This may race, but that's ok, it only gets set */
Chris Wilson50349242016-08-18 17:17:04 +01001661 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
Akash Goel1816f922015-01-02 16:29:30 +05301662 }
Chris Wilson34911fd2016-07-20 13:31:54 +01001663 i915_gem_object_put_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001664 if (IS_ERR((void *)addr))
1665 return addr;
1666
1667 args->addr_ptr = (uint64_t) addr;
1668
1669 return 0;
1670}
1671
Chris Wilson03af84f2016-08-18 17:17:01 +01001672static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1673{
1674 u64 size;
1675
1676 size = i915_gem_object_get_stride(obj);
1677 size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1678
1679 return size >> PAGE_SHIFT;
1680}
1681
Jesse Barnesde151cf2008-11-12 10:03:55 -08001682/**
1683 * i915_gem_fault - fault a page into the GTT
Chris Wilson058d88c2016-08-15 10:49:06 +01001684 * @area: CPU VMA in question
Geliang Tangd9072a32015-09-15 05:58:44 -07001685 * @vmf: fault info
Jesse Barnesde151cf2008-11-12 10:03:55 -08001686 *
1687 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1688 * from userspace. The fault handler takes care of binding the object to
1689 * the GTT (if needed), allocating and programming a fence register (again,
1690 * only if needed based on whether the old reg is still valid or the object
1691 * is tiled) and inserting a new PTE into the faulting process.
1692 *
1693 * Note that the faulting process may involve evicting existing objects
1694 * from the GTT and/or fence registers to make room. So performance may
1695 * suffer if the GTT working set is large or there are few fence registers
1696 * left.
1697 */
Chris Wilson058d88c2016-08-15 10:49:06 +01001698int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001699{
Chris Wilson03af84f2016-08-18 17:17:01 +01001700#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
Chris Wilson058d88c2016-08-15 10:49:06 +01001701 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
Chris Wilson05394f32010-11-08 19:18:58 +00001702 struct drm_device *dev = obj->base.dev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001703 struct drm_i915_private *dev_priv = to_i915(dev);
1704 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001705 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Chris Wilson058d88c2016-08-15 10:49:06 +01001706 struct i915_vma *vma;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001707 pgoff_t page_offset;
Chris Wilson82118872016-08-18 17:17:05 +01001708 unsigned int flags;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001709 int ret;
Paulo Zanonif65c9162013-11-27 18:20:34 -02001710
Jesse Barnesde151cf2008-11-12 10:03:55 -08001711 /* We don't use vmf->pgoff since that has the fake offset */
Chris Wilson058d88c2016-08-15 10:49:06 +01001712 page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
Jesse Barnesde151cf2008-11-12 10:03:55 -08001713 PAGE_SHIFT;
1714
Chris Wilsondb53a302011-02-03 11:57:46 +00001715 trace_i915_gem_object_fault(obj, page_offset, true, write);
1716
Chris Wilson6e4930f2014-02-07 18:37:06 -02001717 /* Try to flush the object off the GPU first without holding the lock.
Chris Wilsonb8f90962016-08-05 10:14:07 +01001718 * Upon acquiring the lock, we will perform our sanity checks and then
Chris Wilson6e4930f2014-02-07 18:37:06 -02001719 * repeat the flush holding the lock in the normal manner to catch cases
1720 * where we are gazumped.
1721 */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001722 ret = __unsafe_wait_rendering(obj, NULL, !write);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001723 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001724 goto err;
1725
1726 intel_runtime_pm_get(dev_priv);
1727
1728 ret = i915_mutex_lock_interruptible(dev);
1729 if (ret)
1730 goto err_rpm;
Chris Wilson6e4930f2014-02-07 18:37:06 -02001731
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001732 /* Access to snoopable pages through the GTT is incoherent. */
1733 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
Chris Wilsonddeff6e2014-05-28 16:16:41 +01001734 ret = -EFAULT;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001735 goto err_unlock;
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001736 }
1737
Chris Wilson82118872016-08-18 17:17:05 +01001738 /* If the object is smaller than a couple of partial vma, it is
1739 * not worth only creating a single partial vma - we may as well
1740 * clear enough space for the full object.
1741 */
1742 flags = PIN_MAPPABLE;
1743 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1744 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1745
Chris Wilsona61007a2016-08-18 17:17:02 +01001746 /* Now pin it into the GTT as needed */
Chris Wilson82118872016-08-18 17:17:05 +01001747 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
Chris Wilsona61007a2016-08-18 17:17:02 +01001748 if (IS_ERR(vma)) {
1749 struct i915_ggtt_view view;
Chris Wilson03af84f2016-08-18 17:17:01 +01001750 unsigned int chunk_size;
1751
Chris Wilsona61007a2016-08-18 17:17:02 +01001752 /* Use a partial view if it is bigger than available space */
Chris Wilson03af84f2016-08-18 17:17:01 +01001753 chunk_size = MIN_CHUNK_PAGES;
1754 if (i915_gem_object_is_tiled(obj))
1755 chunk_size = max(chunk_size, tile_row_pages(obj));
Joonas Lahtinene7ded2d2015-05-08 14:37:39 +03001756
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001757 memset(&view, 0, sizeof(view));
1758 view.type = I915_GGTT_VIEW_PARTIAL;
1759 view.params.partial.offset = rounddown(page_offset, chunk_size);
1760 view.params.partial.size =
Chris Wilsona61007a2016-08-18 17:17:02 +01001761 min_t(unsigned int, chunk_size,
Chris Wilson058d88c2016-08-15 10:49:06 +01001762 (area->vm_end - area->vm_start) / PAGE_SIZE -
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001763 view.params.partial.offset);
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001764
Chris Wilsonaa136d92016-08-18 17:17:03 +01001765 /* If the partial covers the entire object, just create a
1766 * normal VMA.
1767 */
1768 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1769 view.type = I915_GGTT_VIEW_NORMAL;
1770
Chris Wilson50349242016-08-18 17:17:04 +01001771 /* Userspace is now writing through an untracked VMA, abandon
1772 * all hope that the hardware is able to track future writes.
1773 */
1774 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1775
Chris Wilsona61007a2016-08-18 17:17:02 +01001776 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1777 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001778 if (IS_ERR(vma)) {
1779 ret = PTR_ERR(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001780 goto err_unlock;
Chris Wilson058d88c2016-08-15 10:49:06 +01001781 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001782
Chris Wilsonc9839302012-11-20 10:45:17 +00001783 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1784 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001785 goto err_unpin;
Chris Wilsonc9839302012-11-20 10:45:17 +00001786
Chris Wilson49ef5292016-08-18 17:17:00 +01001787 ret = i915_vma_get_fence(vma);
Chris Wilsonc9839302012-11-20 10:45:17 +00001788 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001789 goto err_unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001790
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001791 /* Finally, remap it using the new GTT offset */
Chris Wilsonc58305a2016-08-19 16:54:28 +01001792 ret = remap_io_mapping(area,
1793 area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1794 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1795 min_t(u64, vma->size, area->vm_end - area->vm_start),
1796 &ggtt->mappable);
1797 if (ret)
1798 goto err_unpin;
Chris Wilsona61007a2016-08-18 17:17:02 +01001799
1800 obj->fault_mappable = true;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001801err_unpin:
Chris Wilson058d88c2016-08-15 10:49:06 +01001802 __i915_vma_unpin(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001803err_unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001804 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001805err_rpm:
1806 intel_runtime_pm_put(dev_priv);
1807err:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001808 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001809 case -EIO:
Daniel Vetter2232f032014-09-04 09:36:18 +02001810 /*
1811 * We eat errors when the gpu is terminally wedged to avoid
1812 * userspace unduly crashing (gl has no provisions for mmaps to
1813 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1814 * and so needs to be reported.
1815 */
1816 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
Paulo Zanonif65c9162013-11-27 18:20:34 -02001817 ret = VM_FAULT_SIGBUS;
1818 break;
1819 }
Chris Wilson045e7692010-11-07 09:18:22 +00001820 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001821 /*
1822 * EAGAIN means the gpu is hung and we'll wait for the error
1823 * handler to reset everything when re-faulting in
1824 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001825 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001826 case 0:
1827 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001828 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001829 case -EBUSY:
1830 /*
1831 * EBUSY is ok: this just means that another thread
1832 * already did the job.
1833 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001834 ret = VM_FAULT_NOPAGE;
1835 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001836 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001837 ret = VM_FAULT_OOM;
1838 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001839 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001840 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001841 ret = VM_FAULT_SIGBUS;
1842 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001843 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001844 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001845 ret = VM_FAULT_SIGBUS;
1846 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001847 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001848 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001849}
1850
1851/**
Chris Wilson901782b2009-07-10 08:18:50 +01001852 * i915_gem_release_mmap - remove physical page mappings
1853 * @obj: obj in question
1854 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001855 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001856 * relinquish ownership of the pages back to the system.
1857 *
1858 * It is vital that we remove the page mapping if we have mapped a tiled
1859 * object through the GTT and then lose the fence register due to
1860 * resource pressure. Similarly if the object has been moved out of the
1861 * aperture, than pages mapped into userspace must be revoked. Removing the
1862 * mapping will then trigger a page fault on the next user access, allowing
1863 * fixup by i915_gem_fault().
1864 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001865void
Chris Wilson05394f32010-11-08 19:18:58 +00001866i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001867{
Chris Wilson349f2cc2016-04-13 17:35:12 +01001868 /* Serialisation between user GTT access and our code depends upon
1869 * revoking the CPU's PTE whilst the mutex is held. The next user
1870 * pagefault then has to wait until we release the mutex.
1871 */
1872 lockdep_assert_held(&obj->base.dev->struct_mutex);
1873
Chris Wilson6299f992010-11-24 12:23:44 +00001874 if (!obj->fault_mappable)
1875 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001876
David Herrmann6796cb12014-01-03 14:24:19 +01001877 drm_vma_node_unmap(&obj->base.vma_node,
1878 obj->base.dev->anon_inode->i_mapping);
Chris Wilson349f2cc2016-04-13 17:35:12 +01001879
1880 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1881 * memory transactions from userspace before we return. The TLB
1882 * flushing implied above by changing the PTE above *should* be
1883 * sufficient, an extra barrier here just provides us with a bit
1884 * of paranoid documentation about our requirement to serialise
1885 * memory writes before touching registers / GSM.
1886 */
1887 wmb();
1888
Chris Wilson6299f992010-11-24 12:23:44 +00001889 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001890}
1891
Chris Wilsoneedd10f2014-06-16 08:57:44 +01001892void
1893i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1894{
1895 struct drm_i915_gem_object *obj;
1896
1897 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1898 i915_gem_release_mmap(obj);
1899}
1900
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001901/**
1902 * i915_gem_get_ggtt_size - return required global GTT size for an object
Chris Wilsona9f14812016-08-04 16:32:28 +01001903 * @dev_priv: i915 device
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001904 * @size: object size
1905 * @tiling_mode: tiling mode
1906 *
1907 * Return the required global GTT size for an object, taking into account
1908 * potential fence register mapping.
1909 */
Chris Wilsona9f14812016-08-04 16:32:28 +01001910u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1911 u64 size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001912{
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001913 u64 ggtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001914
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001915 GEM_BUG_ON(size == 0);
1916
Chris Wilsona9f14812016-08-04 16:32:28 +01001917 if (INTEL_GEN(dev_priv) >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001918 tiling_mode == I915_TILING_NONE)
1919 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001920
1921 /* Previous chips need a power-of-two fence region when tiling */
Chris Wilsona9f14812016-08-04 16:32:28 +01001922 if (IS_GEN3(dev_priv))
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001923 ggtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001924 else
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001925 ggtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001926
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001927 while (ggtt_size < size)
1928 ggtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001929
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001930 return ggtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001931}
1932
Jesse Barnesde151cf2008-11-12 10:03:55 -08001933/**
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001934 * i915_gem_get_ggtt_alignment - return required global GTT alignment
Chris Wilsona9f14812016-08-04 16:32:28 +01001935 * @dev_priv: i915 device
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001936 * @size: object size
1937 * @tiling_mode: tiling mode
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001938 * @fenced: is fenced alignment required or not
Jesse Barnesde151cf2008-11-12 10:03:55 -08001939 *
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001940 * Return the required global GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001941 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001942 */
Chris Wilsona9f14812016-08-04 16:32:28 +01001943u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001944 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001945{
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001946 GEM_BUG_ON(size == 0);
1947
Jesse Barnesde151cf2008-11-12 10:03:55 -08001948 /*
1949 * Minimum alignment is 4k (GTT page size), but might be greater
1950 * if a fence register is needed for the object.
1951 */
Chris Wilsona9f14812016-08-04 16:32:28 +01001952 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001953 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001954 return 4096;
1955
1956 /*
1957 * Previous chips need to be aligned to the size of the smallest
1958 * fence register that can contain the object.
1959 */
Chris Wilsona9f14812016-08-04 16:32:28 +01001960 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001961}
1962
Chris Wilsond8cb5082012-08-11 15:41:03 +01001963static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1964{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001965 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonf3f61842016-08-05 10:14:14 +01001966 int err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001967
Chris Wilsonf3f61842016-08-05 10:14:14 +01001968 err = drm_gem_create_mmap_offset(&obj->base);
1969 if (!err)
1970 return 0;
Daniel Vetterda494d72012-12-20 15:11:16 +01001971
Chris Wilsonf3f61842016-08-05 10:14:14 +01001972 /* We can idle the GPU locklessly to flush stale objects, but in order
1973 * to claim that space for ourselves, we need to take the big
1974 * struct_mutex to free the requests+objects and allocate our slot.
Chris Wilsond8cb5082012-08-11 15:41:03 +01001975 */
Chris Wilsonf3f61842016-08-05 10:14:14 +01001976 err = i915_gem_wait_for_idle(dev_priv, true);
1977 if (err)
1978 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001979
Chris Wilsonf3f61842016-08-05 10:14:14 +01001980 err = i915_mutex_lock_interruptible(&dev_priv->drm);
1981 if (!err) {
1982 i915_gem_retire_requests(dev_priv);
1983 err = drm_gem_create_mmap_offset(&obj->base);
1984 mutex_unlock(&dev_priv->drm.struct_mutex);
1985 }
Daniel Vetterda494d72012-12-20 15:11:16 +01001986
Chris Wilsonf3f61842016-08-05 10:14:14 +01001987 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001988}
1989
1990static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1991{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001992 drm_gem_free_mmap_offset(&obj->base);
1993}
1994
Dave Airlieda6b51d2014-12-24 13:11:17 +10001995int
Dave Airlieff72145b2011-02-07 12:16:14 +10001996i915_gem_mmap_gtt(struct drm_file *file,
1997 struct drm_device *dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +10001998 uint32_t handle,
Dave Airlieff72145b2011-02-07 12:16:14 +10001999 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002000{
Chris Wilson05394f32010-11-08 19:18:58 +00002001 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002002 int ret;
2003
Chris Wilson03ac0642016-07-20 13:31:51 +01002004 obj = i915_gem_object_lookup(file, handle);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002005 if (!obj)
2006 return -ENOENT;
Chris Wilsonab182822009-09-22 18:46:17 +01002007
Chris Wilsond8cb5082012-08-11 15:41:03 +01002008 ret = i915_gem_object_create_mmap_offset(obj);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002009 if (ret == 0)
2010 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002011
Chris Wilsonf3f61842016-08-05 10:14:14 +01002012 i915_gem_object_put_unlocked(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002013 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002014}
2015
Dave Airlieff72145b2011-02-07 12:16:14 +10002016/**
2017 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2018 * @dev: DRM device
2019 * @data: GTT mapping ioctl data
2020 * @file: GEM object info
2021 *
2022 * Simply returns the fake offset to userspace so it can mmap it.
2023 * The mmap call will end up in drm_gem_mmap(), which will set things
2024 * up so we can get faults in the handler above.
2025 *
2026 * The fault handler will take care of binding the object into the GTT
2027 * (since it may have been evicted to make room for something), allocating
2028 * a fence register, and mapping the appropriate aperture address into
2029 * userspace.
2030 */
2031int
2032i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2033 struct drm_file *file)
2034{
2035 struct drm_i915_gem_mmap_gtt *args = data;
2036
Dave Airlieda6b51d2014-12-24 13:11:17 +10002037 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
Dave Airlieff72145b2011-02-07 12:16:14 +10002038}
2039
Daniel Vetter225067e2012-08-20 10:23:20 +02002040/* Immediately discard the backing storage */
2041static void
2042i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01002043{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002044 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02002045
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002046 if (obj->base.filp == NULL)
2047 return;
2048
Daniel Vetter225067e2012-08-20 10:23:20 +02002049 /* Our goal here is to return as much of the memory as
2050 * is possible back to the system as we are called from OOM.
2051 * To do this we must instruct the shmfs to drop all of its
2052 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01002053 */
Chris Wilson55372522014-03-25 13:23:06 +00002054 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Daniel Vetter225067e2012-08-20 10:23:20 +02002055 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01002056}
Chris Wilsone5281cc2010-10-28 13:45:36 +01002057
Chris Wilson55372522014-03-25 13:23:06 +00002058/* Try to discard unwanted pages */
2059static void
2060i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02002061{
Chris Wilson55372522014-03-25 13:23:06 +00002062 struct address_space *mapping;
2063
2064 switch (obj->madv) {
2065 case I915_MADV_DONTNEED:
2066 i915_gem_object_truncate(obj);
2067 case __I915_MADV_PURGED:
2068 return;
2069 }
2070
2071 if (obj->base.filp == NULL)
2072 return;
2073
Al Viro93c76a32015-12-04 23:45:44 -05002074 mapping = obj->base.filp->f_mapping,
Chris Wilson55372522014-03-25 13:23:06 +00002075 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002076}
2077
Chris Wilson5cdf5882010-09-27 15:51:07 +01002078static void
Chris Wilson05394f32010-11-08 19:18:58 +00002079i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002080{
Dave Gordon85d12252016-05-20 11:54:06 +01002081 struct sgt_iter sgt_iter;
2082 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002083 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02002084
Chris Wilson05394f32010-11-08 19:18:58 +00002085 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07002086
Chris Wilson6c085a72012-08-20 11:40:46 +02002087 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilsonf4457ae2016-04-13 17:35:08 +01002088 if (WARN_ON(ret)) {
Chris Wilson6c085a72012-08-20 11:40:46 +02002089 /* In the event of a disaster, abandon all caches and
2090 * hope for the best.
2091 */
Chris Wilson2c225692013-08-09 12:26:45 +01002092 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02002093 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2094 }
2095
Imre Deake2273302015-07-09 12:59:05 +03002096 i915_gem_gtt_finish_object(obj);
2097
Daniel Vetter6dacfd22011-09-12 21:30:02 +02002098 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07002099 i915_gem_object_save_bit_17_swizzle(obj);
2100
Chris Wilson05394f32010-11-08 19:18:58 +00002101 if (obj->madv == I915_MADV_DONTNEED)
2102 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01002103
Dave Gordon85d12252016-05-20 11:54:06 +01002104 for_each_sgt_page(page, sgt_iter, obj->pages) {
Chris Wilson05394f32010-11-08 19:18:58 +00002105 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01002106 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002107
Chris Wilson05394f32010-11-08 19:18:58 +00002108 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01002109 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002110
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002111 put_page(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002112 }
Chris Wilson05394f32010-11-08 19:18:58 +00002113 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002114
Chris Wilson9da3da62012-06-01 15:20:22 +01002115 sg_free_table(obj->pages);
2116 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01002117}
2118
Chris Wilsondd624af2013-01-15 12:39:35 +00002119int
Chris Wilson37e680a2012-06-07 15:38:42 +01002120i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2121{
2122 const struct drm_i915_gem_object_ops *ops = obj->ops;
2123
Chris Wilson2f745ad2012-09-04 21:02:58 +01002124 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01002125 return 0;
2126
Chris Wilsona5570172012-09-04 21:02:54 +01002127 if (obj->pages_pin_count)
2128 return -EBUSY;
2129
Chris Wilson15717de2016-08-04 07:52:26 +01002130 GEM_BUG_ON(obj->bind_count);
Ben Widawsky3e123022013-07-31 17:00:04 -07002131
Chris Wilsona2165e32012-12-03 11:49:00 +00002132 /* ->put_pages might need to allocate memory for the bit17 swizzle
2133 * array, hence protect them from being reaped by removing them from gtt
2134 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07002135 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00002136
Chris Wilson0a798eb2016-04-08 12:11:11 +01002137 if (obj->mapping) {
Chris Wilson4b30cb22016-08-18 17:16:42 +01002138 void *ptr;
2139
2140 ptr = ptr_mask_bits(obj->mapping);
2141 if (is_vmalloc_addr(ptr))
2142 vunmap(ptr);
Chris Wilsonfb8621d2016-04-08 12:11:14 +01002143 else
Chris Wilson4b30cb22016-08-18 17:16:42 +01002144 kunmap(kmap_to_page(ptr));
2145
Chris Wilson0a798eb2016-04-08 12:11:11 +01002146 obj->mapping = NULL;
2147 }
2148
Chris Wilson37e680a2012-06-07 15:38:42 +01002149 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002150 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02002151
Chris Wilson55372522014-03-25 13:23:06 +00002152 i915_gem_object_invalidate(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02002153
2154 return 0;
2155}
2156
Chris Wilson37e680a2012-06-07 15:38:42 +01002157static int
Chris Wilson6c085a72012-08-20 11:40:46 +02002158i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002159{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002160 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002161 int page_count, i;
2162 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01002163 struct sg_table *st;
2164 struct scatterlist *sg;
Dave Gordon85d12252016-05-20 11:54:06 +01002165 struct sgt_iter sgt_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07002166 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002167 unsigned long last_pfn = 0; /* suppress gcc warning */
Imre Deake2273302015-07-09 12:59:05 +03002168 int ret;
Chris Wilson6c085a72012-08-20 11:40:46 +02002169 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07002170
Chris Wilson6c085a72012-08-20 11:40:46 +02002171 /* Assert that the object is not currently in any GPU domain. As it
2172 * wasn't in the GTT, there shouldn't be any way it could have been in
2173 * a GPU cache
2174 */
2175 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2176 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2177
Chris Wilson9da3da62012-06-01 15:20:22 +01002178 st = kmalloc(sizeof(*st), GFP_KERNEL);
2179 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002180 return -ENOMEM;
2181
Chris Wilson9da3da62012-06-01 15:20:22 +01002182 page_count = obj->base.size / PAGE_SIZE;
2183 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01002184 kfree(st);
2185 return -ENOMEM;
2186 }
2187
2188 /* Get the list of pages out of our struct file. They'll be pinned
2189 * at this point until we release them.
2190 *
2191 * Fail silently without starting the shrinker
2192 */
Al Viro93c76a32015-12-04 23:45:44 -05002193 mapping = obj->base.filp->f_mapping;
Michal Hockoc62d2552015-11-06 16:28:49 -08002194 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
Mel Gormand0164ad2015-11-06 16:28:21 -08002195 gfp |= __GFP_NORETRY | __GFP_NOWARN;
Imre Deak90797e62013-02-18 19:28:03 +02002196 sg = st->sgl;
2197 st->nents = 0;
2198 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02002199 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2200 if (IS_ERR(page)) {
Chris Wilson21ab4e72014-09-09 11:16:08 +01002201 i915_gem_shrink(dev_priv,
2202 page_count,
2203 I915_SHRINK_BOUND |
2204 I915_SHRINK_UNBOUND |
2205 I915_SHRINK_PURGEABLE);
Chris Wilson6c085a72012-08-20 11:40:46 +02002206 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2207 }
2208 if (IS_ERR(page)) {
2209 /* We've tried hard to allocate the memory by reaping
2210 * our own buffer, now let the real VM do its job and
2211 * go down in flames if truly OOM.
2212 */
Chris Wilson6c085a72012-08-20 11:40:46 +02002213 i915_gem_shrink_all(dev_priv);
David Herrmannf461d1b2014-05-25 14:34:10 +02002214 page = shmem_read_mapping_page(mapping, i);
Imre Deake2273302015-07-09 12:59:05 +03002215 if (IS_ERR(page)) {
2216 ret = PTR_ERR(page);
Chris Wilson6c085a72012-08-20 11:40:46 +02002217 goto err_pages;
Imre Deake2273302015-07-09 12:59:05 +03002218 }
Chris Wilson6c085a72012-08-20 11:40:46 +02002219 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002220#ifdef CONFIG_SWIOTLB
2221 if (swiotlb_nr_tbl()) {
2222 st->nents++;
2223 sg_set_page(sg, page, PAGE_SIZE, 0);
2224 sg = sg_next(sg);
2225 continue;
2226 }
2227#endif
Imre Deak90797e62013-02-18 19:28:03 +02002228 if (!i || page_to_pfn(page) != last_pfn + 1) {
2229 if (i)
2230 sg = sg_next(sg);
2231 st->nents++;
2232 sg_set_page(sg, page, PAGE_SIZE, 0);
2233 } else {
2234 sg->length += PAGE_SIZE;
2235 }
2236 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03002237
2238 /* Check that the i965g/gm workaround works. */
2239 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07002240 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002241#ifdef CONFIG_SWIOTLB
2242 if (!swiotlb_nr_tbl())
2243#endif
2244 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01002245 obj->pages = st;
2246
Imre Deake2273302015-07-09 12:59:05 +03002247 ret = i915_gem_gtt_prepare_object(obj);
2248 if (ret)
2249 goto err_pages;
2250
Eric Anholt673a3942008-07-30 12:06:12 -07002251 if (i915_gem_object_needs_bit17_swizzle(obj))
2252 i915_gem_object_do_bit_17_swizzle(obj);
2253
Chris Wilson3e510a82016-08-05 10:14:23 +01002254 if (i915_gem_object_is_tiled(obj) &&
Daniel Vetter656bfa32014-11-20 09:26:30 +01002255 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2256 i915_gem_object_pin_pages(obj);
2257
Eric Anholt673a3942008-07-30 12:06:12 -07002258 return 0;
2259
2260err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02002261 sg_mark_end(sg);
Dave Gordon85d12252016-05-20 11:54:06 +01002262 for_each_sgt_page(page, sgt_iter, st)
2263 put_page(page);
Chris Wilson9da3da62012-06-01 15:20:22 +01002264 sg_free_table(st);
2265 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00002266
2267 /* shmemfs first checks if there is enough memory to allocate the page
2268 * and reports ENOSPC should there be insufficient, along with the usual
2269 * ENOMEM for a genuine allocation failure.
2270 *
2271 * We use ENOSPC in our driver to mean that we have run out of aperture
2272 * space and so want to translate the error from shmemfs back to our
2273 * usual understanding of ENOMEM.
2274 */
Imre Deake2273302015-07-09 12:59:05 +03002275 if (ret == -ENOSPC)
2276 ret = -ENOMEM;
2277
2278 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002279}
2280
Chris Wilson37e680a2012-06-07 15:38:42 +01002281/* Ensure that the associated pages are gathered from the backing storage
2282 * and pinned into our object. i915_gem_object_get_pages() may be called
2283 * multiple times before they are released by a single call to
2284 * i915_gem_object_put_pages() - once the pages are no longer referenced
2285 * either as a result of memory pressure (reaping pages under the shrinker)
2286 * or as the object is itself released.
2287 */
2288int
2289i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2290{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002291 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson37e680a2012-06-07 15:38:42 +01002292 const struct drm_i915_gem_object_ops *ops = obj->ops;
2293 int ret;
2294
Chris Wilson2f745ad2012-09-04 21:02:58 +01002295 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01002296 return 0;
2297
Chris Wilson43e28f02013-01-08 10:53:09 +00002298 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00002299 DRM_DEBUG("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00002300 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00002301 }
2302
Chris Wilsona5570172012-09-04 21:02:54 +01002303 BUG_ON(obj->pages_pin_count);
2304
Chris Wilson37e680a2012-06-07 15:38:42 +01002305 ret = ops->get_pages(obj);
2306 if (ret)
2307 return ret;
2308
Ben Widawsky35c20a62013-05-31 11:28:48 -07002309 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilsonee286372015-04-07 16:20:25 +01002310
2311 obj->get_page.sg = obj->pages->sgl;
2312 obj->get_page.last = 0;
2313
Chris Wilson37e680a2012-06-07 15:38:42 +01002314 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002315}
2316
Dave Gordondd6034c2016-05-20 11:54:04 +01002317/* The 'mapping' part of i915_gem_object_pin_map() below */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002318static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2319 enum i915_map_type type)
Dave Gordondd6034c2016-05-20 11:54:04 +01002320{
2321 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2322 struct sg_table *sgt = obj->pages;
Dave Gordon85d12252016-05-20 11:54:06 +01002323 struct sgt_iter sgt_iter;
2324 struct page *page;
Dave Gordonb338fa42016-05-20 11:54:05 +01002325 struct page *stack_pages[32];
2326 struct page **pages = stack_pages;
Dave Gordondd6034c2016-05-20 11:54:04 +01002327 unsigned long i = 0;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002328 pgprot_t pgprot;
Dave Gordondd6034c2016-05-20 11:54:04 +01002329 void *addr;
2330
2331 /* A single page can always be kmapped */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002332 if (n_pages == 1 && type == I915_MAP_WB)
Dave Gordondd6034c2016-05-20 11:54:04 +01002333 return kmap(sg_page(sgt->sgl));
2334
Dave Gordonb338fa42016-05-20 11:54:05 +01002335 if (n_pages > ARRAY_SIZE(stack_pages)) {
2336 /* Too big for stack -- allocate temporary array instead */
2337 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2338 if (!pages)
2339 return NULL;
2340 }
Dave Gordondd6034c2016-05-20 11:54:04 +01002341
Dave Gordon85d12252016-05-20 11:54:06 +01002342 for_each_sgt_page(page, sgt_iter, sgt)
2343 pages[i++] = page;
Dave Gordondd6034c2016-05-20 11:54:04 +01002344
2345 /* Check that we have the expected number of pages */
2346 GEM_BUG_ON(i != n_pages);
2347
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002348 switch (type) {
2349 case I915_MAP_WB:
2350 pgprot = PAGE_KERNEL;
2351 break;
2352 case I915_MAP_WC:
2353 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2354 break;
2355 }
2356 addr = vmap(pages, n_pages, 0, pgprot);
Dave Gordondd6034c2016-05-20 11:54:04 +01002357
Dave Gordonb338fa42016-05-20 11:54:05 +01002358 if (pages != stack_pages)
2359 drm_free_large(pages);
Dave Gordondd6034c2016-05-20 11:54:04 +01002360
2361 return addr;
2362}
2363
2364/* get, pin, and map the pages of the object into kernel space */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002365void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2366 enum i915_map_type type)
Chris Wilson0a798eb2016-04-08 12:11:11 +01002367{
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002368 enum i915_map_type has_type;
2369 bool pinned;
2370 void *ptr;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002371 int ret;
2372
2373 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002374 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
Chris Wilson0a798eb2016-04-08 12:11:11 +01002375
2376 ret = i915_gem_object_get_pages(obj);
2377 if (ret)
2378 return ERR_PTR(ret);
2379
2380 i915_gem_object_pin_pages(obj);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002381 pinned = obj->pages_pin_count > 1;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002382
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002383 ptr = ptr_unpack_bits(obj->mapping, has_type);
2384 if (ptr && has_type != type) {
2385 if (pinned) {
2386 ret = -EBUSY;
2387 goto err;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002388 }
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002389
2390 if (is_vmalloc_addr(ptr))
2391 vunmap(ptr);
2392 else
2393 kunmap(kmap_to_page(ptr));
2394
2395 ptr = obj->mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002396 }
2397
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002398 if (!ptr) {
2399 ptr = i915_gem_object_map(obj, type);
2400 if (!ptr) {
2401 ret = -ENOMEM;
2402 goto err;
2403 }
2404
2405 obj->mapping = ptr_pack_bits(ptr, type);
2406 }
2407
2408 return ptr;
2409
2410err:
2411 i915_gem_object_unpin_pages(obj);
2412 return ERR_PTR(ret);
Chris Wilson0a798eb2016-04-08 12:11:11 +01002413}
2414
Chris Wilsoncaea7472010-11-12 13:53:37 +00002415static void
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002416i915_gem_object_retire__write(struct i915_gem_active *active,
2417 struct drm_i915_gem_request *request)
Chris Wilsonb4716182015-04-27 13:41:17 +01002418{
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002419 struct drm_i915_gem_object *obj =
2420 container_of(active, struct drm_i915_gem_object, last_write);
Chris Wilsonb4716182015-04-27 13:41:17 +01002421
Rodrigo Vivide152b62015-07-07 16:28:51 -07002422 intel_fb_obj_flush(obj, true, ORIGIN_CS);
Chris Wilsonb4716182015-04-27 13:41:17 +01002423}
2424
2425static void
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002426i915_gem_object_retire__read(struct i915_gem_active *active,
2427 struct drm_i915_gem_request *request)
Chris Wilsoncaea7472010-11-12 13:53:37 +00002428{
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002429 int idx = request->engine->id;
2430 struct drm_i915_gem_object *obj =
2431 container_of(active, struct drm_i915_gem_object, last_read[idx]);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002432
Chris Wilson573adb32016-08-04 16:32:39 +01002433 GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
Chris Wilsonb4716182015-04-27 13:41:17 +01002434
Chris Wilson573adb32016-08-04 16:32:39 +01002435 i915_gem_object_clear_active(obj, idx);
2436 if (i915_gem_object_is_active(obj))
Chris Wilsonb4716182015-04-27 13:41:17 +01002437 return;
Chris Wilson65ce3022012-07-20 12:41:02 +01002438
Chris Wilson6c246952015-07-27 10:26:26 +01002439 /* Bump our place on the bound list to keep it roughly in LRU order
2440 * so that we don't steal from recently used but inactive objects
2441 * (unless we are forced to ofc!)
2442 */
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002443 if (obj->bind_count)
2444 list_move_tail(&obj->global_list,
2445 &request->i915->mm.bound_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002446
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002447 i915_gem_object_put(obj);
Chris Wilsonc8725f32014-03-17 12:21:55 +00002448}
2449
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002450static bool i915_context_is_banned(const struct i915_gem_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002451{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002452 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002453
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002454 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002455 return true;
2456
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002457 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
Chris Wilson676fa572014-12-24 08:13:39 -08002458 if (ctx->hang_stats.ban_period_seconds &&
2459 elapsed <= ctx->hang_stats.ban_period_seconds) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002460 DRM_DEBUG("context hanging too fast, banning!\n");
2461 return true;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002462 }
2463
2464 return false;
2465}
2466
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002467static void i915_set_reset_status(struct i915_gem_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002468 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002469{
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002470 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002471
2472 if (guilty) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002473 hs->banned = i915_context_is_banned(ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002474 hs->batch_active++;
2475 hs->guilty_ts = get_seconds();
2476 } else {
2477 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002478 }
2479}
2480
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002481struct drm_i915_gem_request *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002482i915_gem_find_active_request(struct intel_engine_cs *engine)
Chris Wilson9375e442010-09-19 12:21:28 +01002483{
Chris Wilson4db080f2013-12-04 11:37:09 +00002484 struct drm_i915_gem_request *request;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002485
Chris Wilsonf69a02c2016-07-01 17:23:16 +01002486 /* We are called by the error capture and reset at a random
2487 * point in time. In particular, note that neither is crucially
2488 * ordered with an interrupt. After a hang, the GPU is dead and we
2489 * assume that no more writes can happen (we waited long enough for
2490 * all writes that were in transaction to be flushed) - adding an
2491 * extra delay for a recent interrupt is pointless. Hence, we do
2492 * not need an engine->irq_seqno_barrier() before the seqno reads.
2493 */
Chris Wilsonefdf7c02016-08-04 07:52:33 +01002494 list_for_each_entry(request, &engine->request_list, link) {
Chris Wilsonf69a02c2016-07-01 17:23:16 +01002495 if (i915_gem_request_completed(request))
Chris Wilson4db080f2013-12-04 11:37:09 +00002496 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002497
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002498 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002499 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002500
2501 return NULL;
2502}
2503
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002504static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002505{
2506 struct drm_i915_gem_request *request;
2507 bool ring_hung;
2508
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002509 request = i915_gem_find_active_request(engine);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002510 if (request == NULL)
2511 return;
2512
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002513 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002514
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002515 i915_set_reset_status(request->ctx, ring_hung);
Chris Wilsonefdf7c02016-08-04 07:52:33 +01002516 list_for_each_entry_continue(request, &engine->request_list, link)
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002517 i915_set_reset_status(request->ctx, false);
Chris Wilson4db080f2013-12-04 11:37:09 +00002518}
2519
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002520static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
Chris Wilson4db080f2013-12-04 11:37:09 +00002521{
Chris Wilsondcff85c2016-08-05 10:14:11 +01002522 struct drm_i915_gem_request *request;
Chris Wilson7e37f882016-08-02 22:50:21 +01002523 struct intel_ring *ring;
Chris Wilson608c1a52015-09-03 13:01:40 +01002524
Chris Wilsonc4b09302016-07-20 09:21:10 +01002525 /* Mark all pending requests as complete so that any concurrent
2526 * (lockless) lookup doesn't try and wait upon the request as we
2527 * reset it.
2528 */
Chris Wilson87b723a2016-08-09 08:37:02 +01002529 intel_engine_init_seqno(engine, engine->last_submitted_seqno);
Chris Wilsonc4b09302016-07-20 09:21:10 +01002530
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002531 /*
Oscar Mateodcb4c122014-11-13 10:28:10 +00002532 * Clear the execlists queue up before freeing the requests, as those
2533 * are the ones that keep the context and ringbuffer backing objects
2534 * pinned in place.
2535 */
Oscar Mateodcb4c122014-11-13 10:28:10 +00002536
Tomas Elf7de16912015-10-19 16:32:32 +01002537 if (i915.enable_execlists) {
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +01002538 /* Ensure irq handler finishes or is cancelled. */
2539 tasklet_kill(&engine->irq_tasklet);
Mika Kuoppala1197b4f2015-01-13 11:32:24 +02002540
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +01002541 intel_execlists_cancel_requests(engine);
Oscar Mateodcb4c122014-11-13 10:28:10 +00002542 }
2543
2544 /*
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002545 * We must free the requests after all the corresponding objects have
2546 * been moved off active lists. Which is the same order as the normal
2547 * retire_requests function does. This is important if object hold
2548 * implicit references on things like e.g. ppgtt address spaces through
2549 * the request.
2550 */
Chris Wilson87b723a2016-08-09 08:37:02 +01002551 request = i915_gem_active_raw(&engine->last_request,
2552 &engine->i915->drm.struct_mutex);
Chris Wilsondcff85c2016-08-05 10:14:11 +01002553 if (request)
Chris Wilson05235c52016-07-20 09:21:08 +01002554 i915_gem_request_retire_upto(request);
Chris Wilsondcff85c2016-08-05 10:14:11 +01002555 GEM_BUG_ON(intel_engine_is_active(engine));
Chris Wilson608c1a52015-09-03 13:01:40 +01002556
2557 /* Having flushed all requests from all queues, we know that all
2558 * ringbuffers must now be empty. However, since we do not reclaim
2559 * all space when retiring the request (to prevent HEADs colliding
2560 * with rapid ringbuffer wraparound) the amount of available space
2561 * upon reset is less than when we start. Do one more pass over
2562 * all the ringbuffers to reset last_retired_head.
2563 */
Chris Wilson7e37f882016-08-02 22:50:21 +01002564 list_for_each_entry(ring, &engine->buffers, link) {
2565 ring->last_retired_head = ring->tail;
2566 intel_ring_update_space(ring);
Chris Wilson608c1a52015-09-03 13:01:40 +01002567 }
Chris Wilson2ed53a92016-04-07 07:29:11 +01002568
Chris Wilsonb913b332016-07-13 09:10:31 +01002569 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
Eric Anholt673a3942008-07-30 12:06:12 -07002570}
2571
Chris Wilson069efc12010-09-30 16:53:18 +01002572void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002573{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002574 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002575 struct intel_engine_cs *engine;
Eric Anholt673a3942008-07-30 12:06:12 -07002576
Chris Wilson4db080f2013-12-04 11:37:09 +00002577 /*
2578 * Before we free the objects from the requests, we need to inspect
2579 * them for finding the guilty party. As the requests only borrow
2580 * their reference to the objects, the inspection must be done first.
2581 */
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002582 for_each_engine(engine, dev_priv)
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002583 i915_gem_reset_engine_status(engine);
Chris Wilson4db080f2013-12-04 11:37:09 +00002584
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002585 for_each_engine(engine, dev_priv)
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002586 i915_gem_reset_engine_cleanup(engine);
Chris Wilsonb913b332016-07-13 09:10:31 +01002587 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
Chris Wilsondfaae392010-09-22 10:31:52 +01002588
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002589 i915_gem_context_reset(dev);
2590
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002591 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002592}
2593
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002594static void
Eric Anholt673a3942008-07-30 12:06:12 -07002595i915_gem_retire_work_handler(struct work_struct *work)
2596{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002597 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01002598 container_of(work, typeof(*dev_priv), gt.retire_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01002599 struct drm_device *dev = &dev_priv->drm;
Eric Anholt673a3942008-07-30 12:06:12 -07002600
Chris Wilson891b48c2010-09-29 12:26:37 +01002601 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002602 if (mutex_trylock(&dev->struct_mutex)) {
Chris Wilson67d97da2016-07-04 08:08:31 +01002603 i915_gem_retire_requests(dev_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002604 mutex_unlock(&dev->struct_mutex);
2605 }
Chris Wilson67d97da2016-07-04 08:08:31 +01002606
2607 /* Keep the retire handler running until we are finally idle.
2608 * We do not need to do this test under locking as in the worst-case
2609 * we queue the retire worker once too often.
2610 */
Chris Wilsonc9615612016-07-09 10:12:06 +01002611 if (READ_ONCE(dev_priv->gt.awake)) {
2612 i915_queue_hangcheck(dev_priv);
Chris Wilson67d97da2016-07-04 08:08:31 +01002613 queue_delayed_work(dev_priv->wq,
2614 &dev_priv->gt.retire_work,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002615 round_jiffies_up_relative(HZ));
Chris Wilsonc9615612016-07-09 10:12:06 +01002616 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002617}
Chris Wilson891b48c2010-09-29 12:26:37 +01002618
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002619static void
2620i915_gem_idle_work_handler(struct work_struct *work)
2621{
2622 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01002623 container_of(work, typeof(*dev_priv), gt.idle_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01002624 struct drm_device *dev = &dev_priv->drm;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002625 struct intel_engine_cs *engine;
Chris Wilson67d97da2016-07-04 08:08:31 +01002626 bool rearm_hangcheck;
2627
2628 if (!READ_ONCE(dev_priv->gt.awake))
2629 return;
2630
2631 if (READ_ONCE(dev_priv->gt.active_engines))
2632 return;
2633
2634 rearm_hangcheck =
2635 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2636
2637 if (!mutex_trylock(&dev->struct_mutex)) {
2638 /* Currently busy, come back later */
2639 mod_delayed_work(dev_priv->wq,
2640 &dev_priv->gt.idle_work,
2641 msecs_to_jiffies(50));
2642 goto out_rearm;
2643 }
2644
2645 if (dev_priv->gt.active_engines)
2646 goto out_unlock;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002647
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002648 for_each_engine(engine, dev_priv)
Chris Wilson67d97da2016-07-04 08:08:31 +01002649 i915_gem_batch_pool_fini(&engine->batch_pool);
Zou Nan hai852835f2010-05-21 09:08:56 +08002650
Chris Wilson67d97da2016-07-04 08:08:31 +01002651 GEM_BUG_ON(!dev_priv->gt.awake);
2652 dev_priv->gt.awake = false;
2653 rearm_hangcheck = false;
Daniel Vetter30ecad72015-12-09 09:29:36 +01002654
Chris Wilson67d97da2016-07-04 08:08:31 +01002655 if (INTEL_GEN(dev_priv) >= 6)
2656 gen6_rps_idle(dev_priv);
2657 intel_runtime_pm_put(dev_priv);
2658out_unlock:
2659 mutex_unlock(&dev->struct_mutex);
Chris Wilson35c94182015-04-07 16:20:37 +01002660
Chris Wilson67d97da2016-07-04 08:08:31 +01002661out_rearm:
2662 if (rearm_hangcheck) {
2663 GEM_BUG_ON(!dev_priv->gt.awake);
2664 i915_queue_hangcheck(dev_priv);
Chris Wilson35c94182015-04-07 16:20:37 +01002665 }
Eric Anholt673a3942008-07-30 12:06:12 -07002666}
2667
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002668void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2669{
2670 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2671 struct drm_i915_file_private *fpriv = file->driver_priv;
2672 struct i915_vma *vma, *vn;
2673
2674 mutex_lock(&obj->base.dev->struct_mutex);
2675 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2676 if (vma->vm->file == fpriv)
2677 i915_vma_close(vma);
2678 mutex_unlock(&obj->base.dev->struct_mutex);
2679}
2680
Ben Widawsky5816d642012-04-11 11:18:19 -07002681/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002682 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002683 * @dev: drm device pointer
2684 * @data: ioctl data blob
2685 * @file: drm file pointer
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002686 *
2687 * Returns 0 if successful, else an error is returned with the remaining time in
2688 * the timeout parameter.
2689 * -ETIME: object is still busy after timeout
2690 * -ERESTARTSYS: signal interrupted the wait
2691 * -ENONENT: object doesn't exist
2692 * Also possible, but rare:
2693 * -EAGAIN: GPU wedged
2694 * -ENOMEM: damn
2695 * -ENODEV: Internal IRQ fail
2696 * -E?: The add request failed
2697 *
2698 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2699 * non-zero timeout parameter the wait ioctl will wait for the given number of
2700 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2701 * without holding struct_mutex the object may become re-busied before this
2702 * function completes. A similar but shorter * race condition exists in the busy
2703 * ioctl
2704 */
2705int
2706i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2707{
2708 struct drm_i915_gem_wait *args = data;
Chris Wilson033d5492016-08-05 10:14:17 +01002709 struct intel_rps_client *rps = to_rps_client(file);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002710 struct drm_i915_gem_object *obj;
Chris Wilson033d5492016-08-05 10:14:17 +01002711 unsigned long active;
2712 int idx, ret = 0;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002713
Daniel Vetter11b5d512014-09-29 15:31:26 +02002714 if (args->flags != 0)
2715 return -EINVAL;
2716
Chris Wilson03ac0642016-07-20 13:31:51 +01002717 obj = i915_gem_object_lookup(file, args->bo_handle);
Chris Wilson033d5492016-08-05 10:14:17 +01002718 if (!obj)
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002719 return -ENOENT;
Chris Wilson033d5492016-08-05 10:14:17 +01002720
2721 active = __I915_BO_ACTIVE(obj);
2722 for_each_active(active, idx) {
2723 s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
2724 ret = i915_gem_active_wait_unlocked(&obj->last_read[idx], true,
2725 timeout, rps);
2726 if (ret)
2727 break;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002728 }
2729
Chris Wilson033d5492016-08-05 10:14:17 +01002730 i915_gem_object_put_unlocked(obj);
John Harrisonff865882014-11-24 18:49:28 +00002731 return ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002732}
2733
Chris Wilsonb4716182015-04-27 13:41:17 +01002734static int
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002735__i915_gem_object_sync(struct drm_i915_gem_request *to,
Chris Wilson8e637172016-08-02 22:50:26 +01002736 struct drm_i915_gem_request *from)
Chris Wilsonb4716182015-04-27 13:41:17 +01002737{
Chris Wilsonb4716182015-04-27 13:41:17 +01002738 int ret;
2739
Chris Wilson8e637172016-08-02 22:50:26 +01002740 if (to->engine == from->engine)
Chris Wilsonb4716182015-04-27 13:41:17 +01002741 return 0;
2742
Chris Wilson39df9192016-07-20 13:31:57 +01002743 if (!i915.semaphores) {
Chris Wilson776f3232016-08-04 07:52:40 +01002744 ret = i915_wait_request(from,
2745 from->i915->mm.interruptible,
2746 NULL,
2747 NO_WAITBOOST);
Chris Wilsonb4716182015-04-27 13:41:17 +01002748 if (ret)
2749 return ret;
Chris Wilsonb4716182015-04-27 13:41:17 +01002750 } else {
Chris Wilson8e637172016-08-02 22:50:26 +01002751 int idx = intel_engine_sync_index(from->engine, to->engine);
Chris Wilsonddf07be2016-08-02 22:50:39 +01002752 if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
Chris Wilsonb4716182015-04-27 13:41:17 +01002753 return 0;
2754
Chris Wilson8e637172016-08-02 22:50:26 +01002755 trace_i915_gem_ring_sync_to(to, from);
Chris Wilsonddf07be2016-08-02 22:50:39 +01002756 ret = to->engine->semaphore.sync_to(to, from);
Chris Wilsonb4716182015-04-27 13:41:17 +01002757 if (ret)
2758 return ret;
2759
Chris Wilsonddf07be2016-08-02 22:50:39 +01002760 from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
Chris Wilsonb4716182015-04-27 13:41:17 +01002761 }
2762
2763 return 0;
2764}
2765
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002766/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002767 * i915_gem_object_sync - sync an object to a ring.
2768 *
2769 * @obj: object which may be in use on another ring.
Chris Wilson8e637172016-08-02 22:50:26 +01002770 * @to: request we are wishing to use
Ben Widawsky5816d642012-04-11 11:18:19 -07002771 *
2772 * This code is meant to abstract object synchronization with the GPU.
Chris Wilson8e637172016-08-02 22:50:26 +01002773 * Conceptually we serialise writes between engines inside the GPU.
2774 * We only allow one engine to write into a buffer at any time, but
2775 * multiple readers. To ensure each has a coherent view of memory, we must:
Chris Wilsonb4716182015-04-27 13:41:17 +01002776 *
2777 * - If there is an outstanding write request to the object, the new
2778 * request must wait for it to complete (either CPU or in hw, requests
2779 * on the same ring will be naturally ordered).
2780 *
2781 * - If we are a write request (pending_write_domain is set), the new
2782 * request must wait for outstanding read requests to complete.
Ben Widawsky5816d642012-04-11 11:18:19 -07002783 *
2784 * Returns 0 if successful, else propagates up the lower layer error.
2785 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002786int
2787i915_gem_object_sync(struct drm_i915_gem_object *obj,
Chris Wilson8e637172016-08-02 22:50:26 +01002788 struct drm_i915_gem_request *to)
Ben Widawsky2911a352012-04-05 14:47:36 -07002789{
Chris Wilson8cac6f62016-08-04 07:52:32 +01002790 struct i915_gem_active *active;
2791 unsigned long active_mask;
2792 int idx;
Ben Widawsky2911a352012-04-05 14:47:36 -07002793
Chris Wilson8cac6f62016-08-04 07:52:32 +01002794 lockdep_assert_held(&obj->base.dev->struct_mutex);
2795
Chris Wilson573adb32016-08-04 16:32:39 +01002796 active_mask = i915_gem_object_get_active(obj);
Chris Wilson8cac6f62016-08-04 07:52:32 +01002797 if (!active_mask)
Ben Widawsky2911a352012-04-05 14:47:36 -07002798 return 0;
2799
Chris Wilson8cac6f62016-08-04 07:52:32 +01002800 if (obj->base.pending_write_domain) {
2801 active = obj->last_read;
Chris Wilsonb4716182015-04-27 13:41:17 +01002802 } else {
Chris Wilson8cac6f62016-08-04 07:52:32 +01002803 active_mask = 1;
2804 active = &obj->last_write;
Chris Wilsonb4716182015-04-27 13:41:17 +01002805 }
Chris Wilson8cac6f62016-08-04 07:52:32 +01002806
2807 for_each_active(active_mask, idx) {
2808 struct drm_i915_gem_request *request;
2809 int ret;
2810
2811 request = i915_gem_active_peek(&active[idx],
2812 &obj->base.dev->struct_mutex);
2813 if (!request)
2814 continue;
2815
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002816 ret = __i915_gem_object_sync(to, request);
Chris Wilsonb4716182015-04-27 13:41:17 +01002817 if (ret)
2818 return ret;
2819 }
Ben Widawsky2911a352012-04-05 14:47:36 -07002820
Chris Wilsonb4716182015-04-27 13:41:17 +01002821 return 0;
Ben Widawsky2911a352012-04-05 14:47:36 -07002822}
2823
Chris Wilson8ef85612016-04-28 09:56:39 +01002824static void __i915_vma_iounmap(struct i915_vma *vma)
2825{
Chris Wilson20dfbde2016-08-04 16:32:30 +01002826 GEM_BUG_ON(i915_vma_is_pinned(vma));
Chris Wilson8ef85612016-04-28 09:56:39 +01002827
2828 if (vma->iomap == NULL)
2829 return;
2830
2831 io_mapping_unmap(vma->iomap);
2832 vma->iomap = NULL;
2833}
2834
Chris Wilsondf0e9a22016-08-04 07:52:47 +01002835int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002836{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002837 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002838 unsigned long active;
Chris Wilson43e28f02013-01-08 10:53:09 +00002839 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002840
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002841 /* First wait upon any activity as retiring the request may
2842 * have side-effects such as unpinning or even unbinding this vma.
2843 */
2844 active = i915_vma_get_active(vma);
Chris Wilsondf0e9a22016-08-04 07:52:47 +01002845 if (active) {
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002846 int idx;
2847
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002848 /* When a closed VMA is retired, it is unbound - eek.
2849 * In order to prevent it from being recursively closed,
2850 * take a pin on the vma so that the second unbind is
2851 * aborted.
2852 */
Chris Wilson20dfbde2016-08-04 16:32:30 +01002853 __i915_vma_pin(vma);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002854
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002855 for_each_active(active, idx) {
2856 ret = i915_gem_active_retire(&vma->last_read[idx],
2857 &vma->vm->dev->struct_mutex);
2858 if (ret)
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002859 break;
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002860 }
2861
Chris Wilson20dfbde2016-08-04 16:32:30 +01002862 __i915_vma_unpin(vma);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002863 if (ret)
2864 return ret;
2865
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002866 GEM_BUG_ON(i915_vma_is_active(vma));
2867 }
2868
Chris Wilson20dfbde2016-08-04 16:32:30 +01002869 if (i915_vma_is_pinned(vma))
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002870 return -EBUSY;
2871
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002872 if (!drm_mm_node_allocated(&vma->node))
2873 goto destroy;
Ben Widawsky433544b2013-08-13 18:09:06 -07002874
Chris Wilson15717de2016-08-04 07:52:26 +01002875 GEM_BUG_ON(obj->bind_count == 0);
2876 GEM_BUG_ON(!obj->pages);
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002877
Chris Wilson05a20d02016-08-18 17:16:55 +01002878 if (i915_vma_is_map_and_fenceable(vma)) {
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002879 /* release the fence reg _after_ flushing */
Chris Wilson49ef5292016-08-18 17:17:00 +01002880 ret = i915_vma_put_fence(vma);
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002881 if (ret)
2882 return ret;
Chris Wilson8ef85612016-04-28 09:56:39 +01002883
Chris Wilsoncd3127d2016-08-18 17:17:09 +01002884 /* Force a pagefault for domain tracking on next user access */
2885 i915_gem_release_mmap(obj);
2886
Chris Wilson8ef85612016-04-28 09:56:39 +01002887 __i915_vma_iounmap(vma);
Chris Wilson05a20d02016-08-18 17:16:55 +01002888 vma->flags &= ~I915_VMA_CAN_FENCE;
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002889 }
Daniel Vetter96b47b62009-12-15 17:50:00 +01002890
Chris Wilson50e046b2016-08-04 07:52:46 +01002891 if (likely(!vma->vm->closed)) {
2892 trace_i915_vma_unbind(vma);
2893 vma->vm->unbind_vma(vma);
2894 }
Chris Wilson3272db52016-08-04 16:32:32 +01002895 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
Ben Widawsky6f65e292013-12-06 14:10:56 -08002896
Chris Wilson50e046b2016-08-04 07:52:46 +01002897 drm_mm_remove_node(&vma->node);
2898 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2899
Chris Wilson05a20d02016-08-18 17:16:55 +01002900 if (vma->pages != obj->pages) {
2901 GEM_BUG_ON(!vma->pages);
2902 sg_free_table(vma->pages);
2903 kfree(vma->pages);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00002904 }
Chris Wilson247177d2016-08-15 10:48:47 +01002905 vma->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07002906
Ben Widawsky2f633152013-07-17 12:19:03 -07002907 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002908 * no more VMAs exist. */
Chris Wilson15717de2016-08-04 07:52:26 +01002909 if (--obj->bind_count == 0)
2910 list_move_tail(&obj->global_list,
2911 &to_i915(obj->base.dev)->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002912
Chris Wilson70903c32013-12-04 09:59:09 +00002913 /* And finally now the object is completely decoupled from this vma,
2914 * we can drop its hold on the backing storage and allow it to be
2915 * reaped by the shrinker.
2916 */
2917 i915_gem_object_unpin_pages(obj);
2918
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002919destroy:
Chris Wilson3272db52016-08-04 16:32:32 +01002920 if (unlikely(i915_vma_is_closed(vma)))
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002921 i915_vma_destroy(vma);
2922
Chris Wilson88241782011-01-07 17:09:48 +00002923 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002924}
2925
Chris Wilsondcff85c2016-08-05 10:14:11 +01002926int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2927 bool interruptible)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002928{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002929 struct intel_engine_cs *engine;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002930 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002931
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002932 for_each_engine(engine, dev_priv) {
Chris Wilson62e63002016-06-24 14:55:52 +01002933 if (engine->last_context == NULL)
2934 continue;
2935
Chris Wilsondcff85c2016-08-05 10:14:11 +01002936 ret = intel_engine_idle(engine, interruptible);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002937 if (ret)
2938 return ret;
2939 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002940
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002941 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002942}
2943
Chris Wilson4144f9b2014-09-11 08:43:48 +01002944static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
Chris Wilson42d6ab42012-07-26 11:49:32 +01002945 unsigned long cache_level)
2946{
Chris Wilson4144f9b2014-09-11 08:43:48 +01002947 struct drm_mm_node *gtt_space = &vma->node;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002948 struct drm_mm_node *other;
2949
Chris Wilson4144f9b2014-09-11 08:43:48 +01002950 /*
2951 * On some machines we have to be careful when putting differing types
2952 * of snoopable memory together to avoid the prefetcher crossing memory
2953 * domains and dying. During vm initialisation, we decide whether or not
2954 * these constraints apply and set the drm_mm.color_adjust
2955 * appropriately.
Chris Wilson42d6ab42012-07-26 11:49:32 +01002956 */
Chris Wilson4144f9b2014-09-11 08:43:48 +01002957 if (vma->vm->mm.color_adjust == NULL)
Chris Wilson42d6ab42012-07-26 11:49:32 +01002958 return true;
2959
Ben Widawskyc6cfb322013-07-05 14:41:06 -07002960 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01002961 return true;
2962
2963 if (list_empty(&gtt_space->node_list))
2964 return true;
2965
2966 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2967 if (other->allocated && !other->hole_follows && other->color != cache_level)
2968 return false;
2969
2970 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2971 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2972 return false;
2973
2974 return true;
2975}
2976
Jesse Barnesde151cf2008-11-12 10:03:55 -08002977/**
Chris Wilson59bfa122016-08-04 16:32:31 +01002978 * i915_vma_insert - finds a slot for the vma in its address space
2979 * @vma: the vma
Chris Wilson91b2db62016-08-04 16:32:23 +01002980 * @size: requested size in bytes (can be larger than the VMA)
Chris Wilson59bfa122016-08-04 16:32:31 +01002981 * @alignment: required alignment
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002982 * @flags: mask of PIN_* flags to use
Chris Wilson59bfa122016-08-04 16:32:31 +01002983 *
2984 * First we try to allocate some free space that meets the requirements for
2985 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
2986 * preferrably the oldest idle entry to make room for the new VMA.
2987 *
2988 * Returns:
2989 * 0 on success, negative error code otherwise.
Eric Anholt673a3942008-07-30 12:06:12 -07002990 */
Chris Wilson59bfa122016-08-04 16:32:31 +01002991static int
2992i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
Eric Anholt673a3942008-07-30 12:06:12 -07002993{
Chris Wilson59bfa122016-08-04 16:32:31 +01002994 struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
2995 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonde180032016-08-04 16:32:29 +01002996 u64 start, end;
Chris Wilson07f73f62009-09-14 16:50:30 +01002997 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002998
Chris Wilson3272db52016-08-04 16:32:32 +01002999 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Chris Wilson59bfa122016-08-04 16:32:31 +01003000 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003001
Chris Wilsonde180032016-08-04 16:32:29 +01003002 size = max(size, vma->size);
3003 if (flags & PIN_MAPPABLE)
Chris Wilson3e510a82016-08-05 10:14:23 +01003004 size = i915_gem_get_ggtt_size(dev_priv, size,
3005 i915_gem_object_get_tiling(obj));
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003006
Chris Wilsond8923dc2016-08-18 17:17:07 +01003007 alignment = max(max(alignment, vma->display_alignment),
3008 i915_gem_get_ggtt_alignment(dev_priv, size,
3009 i915_gem_object_get_tiling(obj),
3010 flags & PIN_MAPPABLE));
Chris Wilsona00b10c2010-09-24 21:15:47 +01003011
Michel Thierry101b5062015-10-01 13:33:57 +01003012 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonde180032016-08-04 16:32:29 +01003013
3014 end = vma->vm->total;
Michel Thierry101b5062015-10-01 13:33:57 +01003015 if (flags & PIN_MAPPABLE)
Chris Wilson91b2db62016-08-04 16:32:23 +01003016 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
Michel Thierry101b5062015-10-01 13:33:57 +01003017 if (flags & PIN_ZONE_4G)
Michel Thierry48ea1e32016-01-11 11:39:27 +00003018 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
Michel Thierry101b5062015-10-01 13:33:57 +01003019
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003020 /* If binding the object/GGTT view requires more space than the entire
3021 * aperture has, reject it early before evicting everything in a vain
3022 * attempt to find space.
Chris Wilson654fc602010-05-27 13:18:21 +01003023 */
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003024 if (size > end) {
Chris Wilsonde180032016-08-04 16:32:29 +01003025 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
Chris Wilson91b2db62016-08-04 16:32:23 +01003026 size, obj->base.size,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003027 flags & PIN_MAPPABLE ? "mappable" : "total",
Chris Wilsond23db882014-05-23 08:48:08 +02003028 end);
Chris Wilson59bfa122016-08-04 16:32:31 +01003029 return -E2BIG;
Chris Wilson654fc602010-05-27 13:18:21 +01003030 }
3031
Chris Wilson37e680a2012-06-07 15:38:42 +01003032 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003033 if (ret)
Chris Wilson59bfa122016-08-04 16:32:31 +01003034 return ret;
Chris Wilson6c085a72012-08-20 11:40:46 +02003035
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003036 i915_gem_object_pin_pages(obj);
3037
Chris Wilson506a8e82015-12-08 11:55:07 +00003038 if (flags & PIN_OFFSET_FIXED) {
Chris Wilson59bfa122016-08-04 16:32:31 +01003039 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonde180032016-08-04 16:32:29 +01003040 if (offset & (alignment - 1) || offset > end - size) {
Chris Wilson506a8e82015-12-08 11:55:07 +00003041 ret = -EINVAL;
Chris Wilsonde180032016-08-04 16:32:29 +01003042 goto err_unpin;
Chris Wilson506a8e82015-12-08 11:55:07 +00003043 }
Chris Wilsonde180032016-08-04 16:32:29 +01003044
Chris Wilson506a8e82015-12-08 11:55:07 +00003045 vma->node.start = offset;
3046 vma->node.size = size;
3047 vma->node.color = obj->cache_level;
Chris Wilsonde180032016-08-04 16:32:29 +01003048 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
Chris Wilson506a8e82015-12-08 11:55:07 +00003049 if (ret) {
3050 ret = i915_gem_evict_for_vma(vma);
3051 if (ret == 0)
Chris Wilsonde180032016-08-04 16:32:29 +01003052 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3053 if (ret)
3054 goto err_unpin;
Chris Wilson506a8e82015-12-08 11:55:07 +00003055 }
Michel Thierry101b5062015-10-01 13:33:57 +01003056 } else {
Chris Wilsonde180032016-08-04 16:32:29 +01003057 u32 search_flag, alloc_flag;
3058
Chris Wilson506a8e82015-12-08 11:55:07 +00003059 if (flags & PIN_HIGH) {
3060 search_flag = DRM_MM_SEARCH_BELOW;
3061 alloc_flag = DRM_MM_CREATE_TOP;
3062 } else {
3063 search_flag = DRM_MM_SEARCH_DEFAULT;
3064 alloc_flag = DRM_MM_CREATE_DEFAULT;
3065 }
Michel Thierry101b5062015-10-01 13:33:57 +01003066
Chris Wilson954c4692016-08-04 16:32:26 +01003067 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3068 * so we know that we always have a minimum alignment of 4096.
3069 * The drm_mm range manager is optimised to return results
3070 * with zero alignment, so where possible use the optimal
3071 * path.
3072 */
3073 if (alignment <= 4096)
3074 alignment = 0;
3075
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003076search_free:
Chris Wilsonde180032016-08-04 16:32:29 +01003077 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3078 &vma->node,
Chris Wilson506a8e82015-12-08 11:55:07 +00003079 size, alignment,
3080 obj->cache_level,
3081 start, end,
3082 search_flag,
3083 alloc_flag);
3084 if (ret) {
Chris Wilsonde180032016-08-04 16:32:29 +01003085 ret = i915_gem_evict_something(vma->vm, size, alignment,
Chris Wilson506a8e82015-12-08 11:55:07 +00003086 obj->cache_level,
3087 start, end,
3088 flags);
3089 if (ret == 0)
3090 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003091
Chris Wilsonde180032016-08-04 16:32:29 +01003092 goto err_unpin;
Chris Wilson506a8e82015-12-08 11:55:07 +00003093 }
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003094 }
Chris Wilson37508582016-08-04 16:32:24 +01003095 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
Eric Anholt673a3942008-07-30 12:06:12 -07003096
Ben Widawsky35c20a62013-05-31 11:28:48 -07003097 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Chris Wilsonde180032016-08-04 16:32:29 +01003098 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
Chris Wilson15717de2016-08-04 07:52:26 +01003099 obj->bind_count++;
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003100
Chris Wilson59bfa122016-08-04 16:32:31 +01003101 return 0;
Ben Widawsky2f633152013-07-17 12:19:03 -07003102
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003103err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003104 i915_gem_object_unpin_pages(obj);
Chris Wilson59bfa122016-08-04 16:32:31 +01003105 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003106}
3107
Chris Wilson000433b2013-08-08 14:41:09 +01003108bool
Chris Wilson2c225692013-08-09 12:26:45 +01003109i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3110 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003111{
Eric Anholt673a3942008-07-30 12:06:12 -07003112 /* If we don't have a page list set up, then we're not pinned
3113 * to GPU, and we can ignore the cache flush because it'll happen
3114 * again at bind time.
3115 */
Chris Wilson05394f32010-11-08 19:18:58 +00003116 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003117 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003118
Imre Deak769ce462013-02-13 21:56:05 +02003119 /*
3120 * Stolen memory is always coherent with the GPU as it is explicitly
3121 * marked as wc by the system, or the system is cache-coherent.
3122 */
Chris Wilson6a2c4232014-11-04 04:51:40 -08003123 if (obj->stolen || obj->phys_handle)
Chris Wilson000433b2013-08-08 14:41:09 +01003124 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003125
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003126 /* If the GPU is snooping the contents of the CPU cache,
3127 * we do not need to manually clear the CPU cache lines. However,
3128 * the caches are only snooped when the render cache is
3129 * flushed/invalidated. As we always have to emit invalidations
3130 * and flushes when moving into and out of the RENDER domain, correct
3131 * snooping behaviour occurs naturally as the result of our domain
3132 * tracking.
3133 */
Chris Wilson0f719792015-01-13 13:32:52 +00003134 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3135 obj->cache_dirty = true;
Chris Wilson000433b2013-08-08 14:41:09 +01003136 return false;
Chris Wilson0f719792015-01-13 13:32:52 +00003137 }
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003138
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003139 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003140 drm_clflush_sg(obj->pages);
Chris Wilson0f719792015-01-13 13:32:52 +00003141 obj->cache_dirty = false;
Chris Wilson000433b2013-08-08 14:41:09 +01003142
3143 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003144}
3145
3146/** Flushes the GTT write domain for the object if it's dirty. */
3147static void
Chris Wilson05394f32010-11-08 19:18:58 +00003148i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003149{
Chris Wilson3b5724d2016-08-18 17:16:49 +01003150 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003151
Chris Wilson05394f32010-11-08 19:18:58 +00003152 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003153 return;
3154
Chris Wilson63256ec2011-01-04 18:42:07 +00003155 /* No actual flushing is required for the GTT write domain. Writes
Chris Wilson3b5724d2016-08-18 17:16:49 +01003156 * to it "immediately" go to main memory as far as we know, so there's
Eric Anholte47c68e2008-11-14 13:35:19 -08003157 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003158 *
3159 * However, we do have to enforce the order so that all writes through
3160 * the GTT land before any writes to the device, such as updates to
3161 * the GATT itself.
Chris Wilson3b5724d2016-08-18 17:16:49 +01003162 *
3163 * We also have to wait a bit for the writes to land from the GTT.
3164 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3165 * timing. This issue has only been observed when switching quickly
3166 * between GTT writes and CPU reads from inside the kernel on recent hw,
3167 * and it appears to only affect discrete GTT blocks (i.e. on LLC
3168 * system agents we cannot reproduce this behaviour).
Eric Anholte47c68e2008-11-14 13:35:19 -08003169 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003170 wmb();
Chris Wilson3b5724d2016-08-18 17:16:49 +01003171 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3172 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
Chris Wilson63256ec2011-01-04 18:42:07 +00003173
Chris Wilsond243ad82016-08-18 17:16:44 +01003174 intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
Daniel Vetterf99d7062014-06-19 16:01:59 +02003175
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003176 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003177 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003178 obj->base.read_domains,
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003179 I915_GEM_DOMAIN_GTT);
Eric Anholte47c68e2008-11-14 13:35:19 -08003180}
3181
3182/** Flushes the CPU write domain for the object if it's dirty. */
3183static void
Daniel Vettere62b59e2015-01-21 14:53:48 +01003184i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003185{
Chris Wilson05394f32010-11-08 19:18:58 +00003186 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003187 return;
3188
Daniel Vettere62b59e2015-01-21 14:53:48 +01003189 if (i915_gem_clflush_object(obj, obj->pin_display))
Chris Wilsonc0336662016-05-06 15:40:21 +01003190 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson000433b2013-08-08 14:41:09 +01003191
Rodrigo Vivide152b62015-07-07 16:28:51 -07003192 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Daniel Vetterf99d7062014-06-19 16:01:59 +02003193
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003194 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003195 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003196 obj->base.read_domains,
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003197 I915_GEM_DOMAIN_CPU);
Eric Anholte47c68e2008-11-14 13:35:19 -08003198}
3199
Chris Wilson383d5822016-08-18 17:17:08 +01003200static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
3201{
3202 struct i915_vma *vma;
3203
3204 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3205 if (!i915_vma_is_ggtt(vma))
3206 continue;
3207
3208 if (i915_vma_is_active(vma))
3209 continue;
3210
3211 if (!drm_mm_node_allocated(&vma->node))
3212 continue;
3213
3214 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3215 }
3216}
3217
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003218/**
3219 * Moves a single object to the GTT read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003220 * @obj: object to act on
3221 * @write: ask for write access or read only
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003222 *
3223 * This function returns when the move is complete, including waiting on
3224 * flushes to occur.
3225 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003226int
Chris Wilson20217462010-11-23 15:26:33 +00003227i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003228{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003229 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003230 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003231
Chris Wilson0201f1e2012-07-20 12:41:01 +01003232 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003233 if (ret)
3234 return ret;
3235
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003236 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3237 return 0;
3238
Chris Wilson43566de2015-01-02 16:29:29 +05303239 /* Flush and acquire obj->pages so that we are coherent through
3240 * direct access in memory with previous cached writes through
3241 * shmemfs and that our cache domain tracking remains valid.
3242 * For example, if the obj->filp was moved to swap without us
3243 * being notified and releasing the pages, we would mistakenly
3244 * continue to assume that the obj remained out of the CPU cached
3245 * domain.
3246 */
3247 ret = i915_gem_object_get_pages(obj);
3248 if (ret)
3249 return ret;
3250
Daniel Vettere62b59e2015-01-21 14:53:48 +01003251 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003252
Chris Wilsond0a57782012-10-09 19:24:37 +01003253 /* Serialise direct access to this object with the barriers for
3254 * coherent writes from the GPU, by effectively invalidating the
3255 * GTT domain upon first access.
3256 */
3257 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3258 mb();
3259
Chris Wilson05394f32010-11-08 19:18:58 +00003260 old_write_domain = obj->base.write_domain;
3261 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003262
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003263 /* It should now be out of any other write domains, and we can update
3264 * the domain values for our changes.
3265 */
Chris Wilson05394f32010-11-08 19:18:58 +00003266 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3267 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003268 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003269 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3270 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3271 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003272 }
3273
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003274 trace_i915_gem_object_change_domain(obj,
3275 old_read_domains,
3276 old_write_domain);
3277
Chris Wilson8325a092012-04-24 15:52:35 +01003278 /* And bump the LRU for this access */
Chris Wilson383d5822016-08-18 17:17:08 +01003279 i915_gem_object_bump_inactive_ggtt(obj);
Chris Wilson8325a092012-04-24 15:52:35 +01003280
Eric Anholte47c68e2008-11-14 13:35:19 -08003281 return 0;
3282}
3283
Chris Wilsonef55f922015-10-09 14:11:27 +01003284/**
3285 * Changes the cache-level of an object across all VMA.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003286 * @obj: object to act on
3287 * @cache_level: new cache level to set for the object
Chris Wilsonef55f922015-10-09 14:11:27 +01003288 *
3289 * After this function returns, the object will be in the new cache-level
3290 * across all GTT and the contents of the backing storage will be coherent,
3291 * with respect to the new cache-level. In order to keep the backing storage
3292 * coherent for all users, we only allow a single cache level to be set
3293 * globally on the object and prevent it from being changed whilst the
3294 * hardware is reading from the object. That is if the object is currently
3295 * on the scanout it will be set to uncached (or equivalent display
3296 * cache coherency) and all non-MOCS GPU access will also be uncached so
3297 * that all direct access to the scanout remains coherent.
3298 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003299int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3300 enum i915_cache_level cache_level)
3301{
Chris Wilsonaa653a62016-08-04 07:52:27 +01003302 struct i915_vma *vma;
Ville Syrjäläed75a552015-08-11 19:47:10 +03003303 int ret = 0;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003304
3305 if (obj->cache_level == cache_level)
Ville Syrjäläed75a552015-08-11 19:47:10 +03003306 goto out;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003307
Chris Wilsonef55f922015-10-09 14:11:27 +01003308 /* Inspect the list of currently bound VMA and unbind any that would
3309 * be invalid given the new cache-level. This is principally to
3310 * catch the issue of the CS prefetch crossing page boundaries and
3311 * reading an invalid PTE on older architectures.
3312 */
Chris Wilsonaa653a62016-08-04 07:52:27 +01003313restart:
3314 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003315 if (!drm_mm_node_allocated(&vma->node))
3316 continue;
3317
Chris Wilson20dfbde2016-08-04 16:32:30 +01003318 if (i915_vma_is_pinned(vma)) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003319 DRM_DEBUG("can not change the cache level of pinned objects\n");
3320 return -EBUSY;
3321 }
3322
Chris Wilsonaa653a62016-08-04 07:52:27 +01003323 if (i915_gem_valid_gtt_space(vma, cache_level))
3324 continue;
3325
3326 ret = i915_vma_unbind(vma);
3327 if (ret)
3328 return ret;
3329
3330 /* As unbinding may affect other elements in the
3331 * obj->vma_list (due to side-effects from retiring
3332 * an active vma), play safe and restart the iterator.
3333 */
3334 goto restart;
Chris Wilson42d6ab42012-07-26 11:49:32 +01003335 }
3336
Chris Wilsonef55f922015-10-09 14:11:27 +01003337 /* We can reuse the existing drm_mm nodes but need to change the
3338 * cache-level on the PTE. We could simply unbind them all and
3339 * rebind with the correct cache-level on next use. However since
3340 * we already have a valid slot, dma mapping, pages etc, we may as
3341 * rewrite the PTE in the belief that doing so tramples upon less
3342 * state and so involves less work.
3343 */
Chris Wilson15717de2016-08-04 07:52:26 +01003344 if (obj->bind_count) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003345 /* Before we change the PTE, the GPU must not be accessing it.
3346 * If we wait upon the object, we know that all the bound
3347 * VMA are no longer active.
3348 */
Chris Wilson2e2f3512015-04-27 13:41:14 +01003349 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003350 if (ret)
3351 return ret;
3352
Chris Wilsonaa653a62016-08-04 07:52:27 +01003353 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003354 /* Access to snoopable pages through the GTT is
3355 * incoherent and on some machines causes a hard
3356 * lockup. Relinquish the CPU mmaping to force
3357 * userspace to refault in the pages and we can
3358 * then double check if the GTT mapping is still
3359 * valid for that pointer access.
3360 */
3361 i915_gem_release_mmap(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003362
Chris Wilsonef55f922015-10-09 14:11:27 +01003363 /* As we no longer need a fence for GTT access,
3364 * we can relinquish it now (and so prevent having
3365 * to steal a fence from someone else on the next
3366 * fence request). Note GPU activity would have
3367 * dropped the fence as all snoopable access is
3368 * supposed to be linear.
3369 */
Chris Wilson49ef5292016-08-18 17:17:00 +01003370 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3371 ret = i915_vma_put_fence(vma);
3372 if (ret)
3373 return ret;
3374 }
Chris Wilsonef55f922015-10-09 14:11:27 +01003375 } else {
3376 /* We either have incoherent backing store and
3377 * so no GTT access or the architecture is fully
3378 * coherent. In such cases, existing GTT mmaps
3379 * ignore the cache bit in the PTE and we can
3380 * rewrite it without confusing the GPU or having
3381 * to force userspace to fault back in its mmaps.
3382 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003383 }
3384
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003385 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003386 if (!drm_mm_node_allocated(&vma->node))
3387 continue;
3388
3389 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3390 if (ret)
3391 return ret;
3392 }
Chris Wilsone4ffd172011-04-04 09:44:39 +01003393 }
3394
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003395 list_for_each_entry(vma, &obj->vma_list, obj_link)
Chris Wilson2c225692013-08-09 12:26:45 +01003396 vma->node.color = cache_level;
3397 obj->cache_level = cache_level;
3398
Ville Syrjäläed75a552015-08-11 19:47:10 +03003399out:
Chris Wilsonef55f922015-10-09 14:11:27 +01003400 /* Flush the dirty CPU caches to the backing storage so that the
3401 * object is now coherent at its new cache level (with respect
3402 * to the access domain).
3403 */
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05303404 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
Chris Wilson0f719792015-01-13 13:32:52 +00003405 if (i915_gem_clflush_object(obj, true))
Chris Wilsonc0336662016-05-06 15:40:21 +01003406 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilsone4ffd172011-04-04 09:44:39 +01003407 }
3408
Chris Wilsone4ffd172011-04-04 09:44:39 +01003409 return 0;
3410}
3411
Ben Widawsky199adf42012-09-21 17:01:20 -07003412int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3413 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003414{
Ben Widawsky199adf42012-09-21 17:01:20 -07003415 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003416 struct drm_i915_gem_object *obj;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003417
Chris Wilson03ac0642016-07-20 13:31:51 +01003418 obj = i915_gem_object_lookup(file, args->handle);
3419 if (!obj)
Chris Wilson432be692015-05-07 12:14:55 +01003420 return -ENOENT;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003421
Chris Wilson651d7942013-08-08 14:41:10 +01003422 switch (obj->cache_level) {
3423 case I915_CACHE_LLC:
3424 case I915_CACHE_L3_LLC:
3425 args->caching = I915_CACHING_CACHED;
3426 break;
3427
Chris Wilson4257d3b2013-08-08 14:41:11 +01003428 case I915_CACHE_WT:
3429 args->caching = I915_CACHING_DISPLAY;
3430 break;
3431
Chris Wilson651d7942013-08-08 14:41:10 +01003432 default:
3433 args->caching = I915_CACHING_NONE;
3434 break;
3435 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003436
Chris Wilson34911fd2016-07-20 13:31:54 +01003437 i915_gem_object_put_unlocked(obj);
Chris Wilson432be692015-05-07 12:14:55 +01003438 return 0;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003439}
3440
Ben Widawsky199adf42012-09-21 17:01:20 -07003441int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3442 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003443{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003444 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawsky199adf42012-09-21 17:01:20 -07003445 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003446 struct drm_i915_gem_object *obj;
3447 enum i915_cache_level level;
3448 int ret;
3449
Ben Widawsky199adf42012-09-21 17:01:20 -07003450 switch (args->caching) {
3451 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003452 level = I915_CACHE_NONE;
3453 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003454 case I915_CACHING_CACHED:
Imre Deake5756c12015-08-14 18:43:30 +03003455 /*
3456 * Due to a HW issue on BXT A stepping, GPU stores via a
3457 * snooped mapping may leave stale data in a corresponding CPU
3458 * cacheline, whereas normally such cachelines would get
3459 * invalidated.
3460 */
Tvrtko Ursulinca377802016-03-02 12:10:31 +00003461 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
Imre Deake5756c12015-08-14 18:43:30 +03003462 return -ENODEV;
3463
Chris Wilsone6994ae2012-07-10 10:27:08 +01003464 level = I915_CACHE_LLC;
3465 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003466 case I915_CACHING_DISPLAY:
3467 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3468 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003469 default:
3470 return -EINVAL;
3471 }
3472
Imre Deakfd0fe6a2015-11-04 21:25:32 +02003473 intel_runtime_pm_get(dev_priv);
3474
Ben Widawsky3bc29132012-09-26 16:15:20 -07003475 ret = i915_mutex_lock_interruptible(dev);
3476 if (ret)
Imre Deakfd0fe6a2015-11-04 21:25:32 +02003477 goto rpm_put;
Ben Widawsky3bc29132012-09-26 16:15:20 -07003478
Chris Wilson03ac0642016-07-20 13:31:51 +01003479 obj = i915_gem_object_lookup(file, args->handle);
3480 if (!obj) {
Chris Wilsone6994ae2012-07-10 10:27:08 +01003481 ret = -ENOENT;
3482 goto unlock;
3483 }
3484
3485 ret = i915_gem_object_set_cache_level(obj, level);
3486
Chris Wilsonf8c417c2016-07-20 13:31:53 +01003487 i915_gem_object_put(obj);
Chris Wilsone6994ae2012-07-10 10:27:08 +01003488unlock:
3489 mutex_unlock(&dev->struct_mutex);
Imre Deakfd0fe6a2015-11-04 21:25:32 +02003490rpm_put:
3491 intel_runtime_pm_put(dev_priv);
3492
Chris Wilsone6994ae2012-07-10 10:27:08 +01003493 return ret;
3494}
3495
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003496/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003497 * Prepare buffer for display plane (scanout, cursors, etc).
3498 * Can be called from an uninterruptible phase (modesetting) and allows
3499 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003500 */
Chris Wilson058d88c2016-08-15 10:49:06 +01003501struct i915_vma *
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003502i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3503 u32 alignment,
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003504 const struct i915_ggtt_view *view)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003505{
Chris Wilson058d88c2016-08-15 10:49:06 +01003506 struct i915_vma *vma;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003507 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003508 int ret;
3509
Chris Wilsoncc98b412013-08-09 12:25:09 +01003510 /* Mark the pin_display early so that we account for the
3511 * display coherency whilst setting up the cache domains.
3512 */
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003513 obj->pin_display++;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003514
Eric Anholta7ef0642011-03-29 16:59:54 -07003515 /* The display engine is not coherent with the LLC cache on gen6. As
3516 * a result, we make sure that the pinning that is about to occur is
3517 * done with uncached PTEs. This is lowest common denominator for all
3518 * chipsets.
3519 *
3520 * However for gen6+, we could do better by using the GFDT bit instead
3521 * of uncaching, which would allow us to flush all the LLC-cached data
3522 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3523 */
Chris Wilson651d7942013-08-08 14:41:10 +01003524 ret = i915_gem_object_set_cache_level(obj,
3525 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Chris Wilson058d88c2016-08-15 10:49:06 +01003526 if (ret) {
3527 vma = ERR_PTR(ret);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003528 goto err_unpin_display;
Chris Wilson058d88c2016-08-15 10:49:06 +01003529 }
Eric Anholta7ef0642011-03-29 16:59:54 -07003530
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003531 /* As the user may map the buffer once pinned in the display plane
3532 * (e.g. libkms for the bootup splash), we have to ensure that we
Chris Wilson2efb8132016-08-18 17:17:06 +01003533 * always use map_and_fenceable for all scanout buffers. However,
3534 * it may simply be too big to fit into mappable, in which case
3535 * put it anyway and hope that userspace can cope (but always first
3536 * try to preserve the existing ABI).
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003537 */
Chris Wilson2efb8132016-08-18 17:17:06 +01003538 vma = ERR_PTR(-ENOSPC);
3539 if (view->type == I915_GGTT_VIEW_NORMAL)
3540 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3541 PIN_MAPPABLE | PIN_NONBLOCK);
3542 if (IS_ERR(vma))
3543 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
Chris Wilson058d88c2016-08-15 10:49:06 +01003544 if (IS_ERR(vma))
Chris Wilsoncc98b412013-08-09 12:25:09 +01003545 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003546
Chris Wilsond8923dc2016-08-18 17:17:07 +01003547 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3548
Chris Wilson058d88c2016-08-15 10:49:06 +01003549 WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
3550
Daniel Vettere62b59e2015-01-21 14:53:48 +01003551 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003552
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003553 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003554 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003555
3556 /* It should now be out of any other write domains, and we can update
3557 * the domain values for our changes.
3558 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003559 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003560 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003561
3562 trace_i915_gem_object_change_domain(obj,
3563 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003564 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003565
Chris Wilson058d88c2016-08-15 10:49:06 +01003566 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003567
3568err_unpin_display:
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003569 obj->pin_display--;
Chris Wilson058d88c2016-08-15 10:49:06 +01003570 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003571}
3572
3573void
Chris Wilson058d88c2016-08-15 10:49:06 +01003574i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003575{
Chris Wilson058d88c2016-08-15 10:49:06 +01003576 if (WARN_ON(vma->obj->pin_display == 0))
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003577 return;
3578
Chris Wilsond8923dc2016-08-18 17:17:07 +01003579 if (--vma->obj->pin_display == 0)
3580 vma->display_alignment = 0;
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003581
Chris Wilson383d5822016-08-18 17:17:08 +01003582 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3583 if (!i915_vma_is_active(vma))
3584 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3585
Chris Wilson058d88c2016-08-15 10:49:06 +01003586 i915_vma_unpin(vma);
3587 WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003588}
3589
Eric Anholte47c68e2008-11-14 13:35:19 -08003590/**
3591 * Moves a single object to the CPU read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003592 * @obj: object to act on
3593 * @write: requesting write or read-only access
Eric Anholte47c68e2008-11-14 13:35:19 -08003594 *
3595 * This function returns when the move is complete, including waiting on
3596 * flushes to occur.
3597 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003598int
Chris Wilson919926a2010-11-12 13:42:53 +00003599i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003600{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003601 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003602 int ret;
3603
Chris Wilson0201f1e2012-07-20 12:41:01 +01003604 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003605 if (ret)
3606 return ret;
3607
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003608 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3609 return 0;
3610
Eric Anholte47c68e2008-11-14 13:35:19 -08003611 i915_gem_object_flush_gtt_write_domain(obj);
3612
Chris Wilson05394f32010-11-08 19:18:58 +00003613 old_write_domain = obj->base.write_domain;
3614 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003615
Eric Anholte47c68e2008-11-14 13:35:19 -08003616 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003617 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003618 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003619
Chris Wilson05394f32010-11-08 19:18:58 +00003620 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003621 }
3622
3623 /* It should now be out of any other write domains, and we can update
3624 * the domain values for our changes.
3625 */
Chris Wilson05394f32010-11-08 19:18:58 +00003626 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003627
3628 /* If we're writing through the CPU, then the GPU read domains will
3629 * need to be invalidated at next use.
3630 */
3631 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003632 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3633 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003634 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003635
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003636 trace_i915_gem_object_change_domain(obj,
3637 old_read_domains,
3638 old_write_domain);
3639
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003640 return 0;
3641}
3642
Eric Anholt673a3942008-07-30 12:06:12 -07003643/* Throttle our rendering by waiting until the ring has completed our requests
3644 * emitted over 20 msec ago.
3645 *
Eric Anholtb9624422009-06-03 07:27:35 +00003646 * Note that if we were to use the current jiffies each time around the loop,
3647 * we wouldn't escape the function with any frames outstanding if the time to
3648 * render a frame was over 20ms.
3649 *
Eric Anholt673a3942008-07-30 12:06:12 -07003650 * This should get us reasonable parallelism between CPU and GPU but also
3651 * relatively low latency when blocking on a particular request to finish.
3652 */
3653static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003654i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003655{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003656 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003657 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsond0bc54f2015-05-21 21:01:48 +01003658 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
John Harrison54fb2412014-11-24 18:49:27 +00003659 struct drm_i915_gem_request *request, *target = NULL;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003660 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003661
Daniel Vetter308887a2012-11-14 17:14:06 +01003662 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3663 if (ret)
3664 return ret;
3665
Chris Wilsonf4457ae2016-04-13 17:35:08 +01003666 /* ABI: return -EIO if already wedged */
3667 if (i915_terminally_wedged(&dev_priv->gpu_error))
3668 return -EIO;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003669
Chris Wilson1c255952010-09-26 11:03:27 +01003670 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003671 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003672 if (time_after_eq(request->emitted_jiffies, recent_enough))
3673 break;
3674
John Harrisonfcfa423c2015-05-29 17:44:12 +01003675 /*
3676 * Note that the request might not have been submitted yet.
3677 * In which case emitted_jiffies will be zero.
3678 */
3679 if (!request->emitted_jiffies)
3680 continue;
3681
John Harrison54fb2412014-11-24 18:49:27 +00003682 target = request;
Eric Anholtb9624422009-06-03 07:27:35 +00003683 }
John Harrisonff865882014-11-24 18:49:28 +00003684 if (target)
Chris Wilsone8a261e2016-07-20 13:31:49 +01003685 i915_gem_request_get(target);
Chris Wilson1c255952010-09-26 11:03:27 +01003686 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003687
John Harrison54fb2412014-11-24 18:49:27 +00003688 if (target == NULL)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003689 return 0;
3690
Chris Wilson776f3232016-08-04 07:52:40 +01003691 ret = i915_wait_request(target, true, NULL, NULL);
Chris Wilsone8a261e2016-07-20 13:31:49 +01003692 i915_gem_request_put(target);
John Harrisonff865882014-11-24 18:49:28 +00003693
Eric Anholt673a3942008-07-30 12:06:12 -07003694 return ret;
3695}
3696
Chris Wilsond23db882014-05-23 08:48:08 +02003697static bool
Chris Wilson91b2db62016-08-04 16:32:23 +01003698i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
Chris Wilsond23db882014-05-23 08:48:08 +02003699{
Chris Wilson59bfa122016-08-04 16:32:31 +01003700 if (!drm_mm_node_allocated(&vma->node))
3701 return false;
3702
Chris Wilson91b2db62016-08-04 16:32:23 +01003703 if (vma->node.size < size)
3704 return true;
3705
3706 if (alignment && vma->node.start & (alignment - 1))
Chris Wilsond23db882014-05-23 08:48:08 +02003707 return true;
3708
Chris Wilson05a20d02016-08-18 17:16:55 +01003709 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
Chris Wilsond23db882014-05-23 08:48:08 +02003710 return true;
3711
3712 if (flags & PIN_OFFSET_BIAS &&
3713 vma->node.start < (flags & PIN_OFFSET_MASK))
3714 return true;
3715
Chris Wilson506a8e82015-12-08 11:55:07 +00003716 if (flags & PIN_OFFSET_FIXED &&
3717 vma->node.start != (flags & PIN_OFFSET_MASK))
3718 return true;
3719
Chris Wilsond23db882014-05-23 08:48:08 +02003720 return false;
3721}
3722
Chris Wilsond0710ab2015-11-20 14:16:39 +00003723void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3724{
3725 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsona9f14812016-08-04 16:32:28 +01003726 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsond0710ab2015-11-20 14:16:39 +00003727 bool mappable, fenceable;
3728 u32 fence_size, fence_alignment;
3729
Chris Wilsona9f14812016-08-04 16:32:28 +01003730 fence_size = i915_gem_get_ggtt_size(dev_priv,
Chris Wilson05a20d02016-08-18 17:16:55 +01003731 vma->size,
Chris Wilson3e510a82016-08-05 10:14:23 +01003732 i915_gem_object_get_tiling(obj));
Chris Wilsona9f14812016-08-04 16:32:28 +01003733 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
Chris Wilson05a20d02016-08-18 17:16:55 +01003734 vma->size,
Chris Wilson3e510a82016-08-05 10:14:23 +01003735 i915_gem_object_get_tiling(obj),
Chris Wilsonad1a7d22016-08-04 16:32:27 +01003736 true);
Chris Wilsond0710ab2015-11-20 14:16:39 +00003737
3738 fenceable = (vma->node.size == fence_size &&
3739 (vma->node.start & (fence_alignment - 1)) == 0);
3740
3741 mappable = (vma->node.start + fence_size <=
Chris Wilsona9f14812016-08-04 16:32:28 +01003742 dev_priv->ggtt.mappable_end);
Chris Wilsond0710ab2015-11-20 14:16:39 +00003743
Chris Wilson05a20d02016-08-18 17:16:55 +01003744 if (mappable && fenceable)
3745 vma->flags |= I915_VMA_CAN_FENCE;
3746 else
3747 vma->flags &= ~I915_VMA_CAN_FENCE;
Chris Wilsond0710ab2015-11-20 14:16:39 +00003748}
3749
Chris Wilson305bc232016-08-04 16:32:33 +01003750int __i915_vma_do_pin(struct i915_vma *vma,
3751 u64 size, u64 alignment, u64 flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003752{
Chris Wilson305bc232016-08-04 16:32:33 +01003753 unsigned int bound = vma->flags;
Eric Anholt673a3942008-07-30 12:06:12 -07003754 int ret;
3755
Chris Wilson59bfa122016-08-04 16:32:31 +01003756 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
Chris Wilson3272db52016-08-04 16:32:32 +01003757 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
Ben Widawsky6e7186a2014-05-06 22:21:36 -07003758
Chris Wilson305bc232016-08-04 16:32:33 +01003759 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
3760 ret = -EBUSY;
3761 goto err;
3762 }
Chris Wilsonc826c442014-10-31 13:53:53 +00003763
Chris Wilsonde895082016-08-04 16:32:34 +01003764 if ((bound & I915_VMA_BIND_MASK) == 0) {
Chris Wilson59bfa122016-08-04 16:32:31 +01003765 ret = i915_vma_insert(vma, size, alignment, flags);
3766 if (ret)
3767 goto err;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003768 }
3769
Chris Wilson59bfa122016-08-04 16:32:31 +01003770 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
Chris Wilson3b165252016-08-04 16:32:25 +01003771 if (ret)
Chris Wilson59bfa122016-08-04 16:32:31 +01003772 goto err;
Chris Wilson3b165252016-08-04 16:32:25 +01003773
Chris Wilson3272db52016-08-04 16:32:32 +01003774 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
Chris Wilsond0710ab2015-11-20 14:16:39 +00003775 __i915_vma_set_map_and_fenceable(vma);
Chris Wilsonef79e172014-10-31 13:53:52 +00003776
Chris Wilson3b165252016-08-04 16:32:25 +01003777 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
Eric Anholt673a3942008-07-30 12:06:12 -07003778 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003779
Chris Wilson59bfa122016-08-04 16:32:31 +01003780err:
3781 __i915_vma_unpin(vma);
3782 return ret;
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003783}
3784
Chris Wilson058d88c2016-08-15 10:49:06 +01003785struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003786i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3787 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01003788 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003789 u64 alignment,
3790 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003791{
Chris Wilson058d88c2016-08-15 10:49:06 +01003792 struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
Chris Wilson59bfa122016-08-04 16:32:31 +01003793 struct i915_vma *vma;
3794 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003795
Chris Wilson058d88c2016-08-15 10:49:06 +01003796 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
Chris Wilson59bfa122016-08-04 16:32:31 +01003797 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +01003798 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01003799
3800 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3801 if (flags & PIN_NONBLOCK &&
3802 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
Chris Wilson058d88c2016-08-15 10:49:06 +01003803 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01003804
3805 WARN(i915_vma_is_pinned(vma),
3806 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01003807 " offset=%08x, req.alignment=%llx,"
3808 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3809 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01003810 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01003811 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01003812 ret = i915_vma_unbind(vma);
3813 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01003814 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01003815 }
3816
Chris Wilson058d88c2016-08-15 10:49:06 +01003817 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3818 if (ret)
3819 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003820
Chris Wilson058d88c2016-08-15 10:49:06 +01003821 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003822}
3823
Chris Wilsonedf6b762016-08-09 09:23:33 +01003824static __always_inline unsigned int __busy_read_flag(unsigned int id)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003825{
3826 /* Note that we could alias engines in the execbuf API, but
3827 * that would be very unwise as it prevents userspace from
3828 * fine control over engine selection. Ahem.
3829 *
3830 * This should be something like EXEC_MAX_ENGINE instead of
3831 * I915_NUM_ENGINES.
3832 */
3833 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3834 return 0x10000 << id;
3835}
3836
3837static __always_inline unsigned int __busy_write_id(unsigned int id)
3838{
Chris Wilson70cb4722016-08-09 18:08:25 +01003839 /* The uABI guarantees an active writer is also amongst the read
3840 * engines. This would be true if we accessed the activity tracking
3841 * under the lock, but as we perform the lookup of the object and
3842 * its activity locklessly we can not guarantee that the last_write
3843 * being active implies that we have set the same engine flag from
3844 * last_read - hence we always set both read and write busy for
3845 * last_write.
3846 */
3847 return id | __busy_read_flag(id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003848}
3849
Chris Wilsonedf6b762016-08-09 09:23:33 +01003850static __always_inline unsigned int
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003851__busy_set_if_active(const struct i915_gem_active *active,
3852 unsigned int (*flag)(unsigned int id))
3853{
Chris Wilson12555012016-08-16 09:50:40 +01003854 struct drm_i915_gem_request *request;
3855
3856 request = rcu_dereference(active->request);
3857 if (!request || i915_gem_request_completed(request))
3858 return 0;
3859
3860 /* This is racy. See __i915_gem_active_get_rcu() for an in detail
3861 * discussion of how to handle the race correctly, but for reporting
3862 * the busy state we err on the side of potentially reporting the
3863 * wrong engine as being busy (but we guarantee that the result
3864 * is at least self-consistent).
3865 *
3866 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
3867 * whilst we are inspecting it, even under the RCU read lock as we are.
3868 * This means that there is a small window for the engine and/or the
3869 * seqno to have been overwritten. The seqno will always be in the
3870 * future compared to the intended, and so we know that if that
3871 * seqno is idle (on whatever engine) our request is idle and the
3872 * return 0 above is correct.
3873 *
3874 * The issue is that if the engine is switched, it is just as likely
3875 * to report that it is busy (but since the switch happened, we know
3876 * the request should be idle). So there is a small chance that a busy
3877 * result is actually the wrong engine.
3878 *
3879 * So why don't we care?
3880 *
3881 * For starters, the busy ioctl is a heuristic that is by definition
3882 * racy. Even with perfect serialisation in the driver, the hardware
3883 * state is constantly advancing - the state we report to the user
3884 * is stale.
3885 *
3886 * The critical information for the busy-ioctl is whether the object
3887 * is idle as userspace relies on that to detect whether its next
3888 * access will stall, or if it has missed submitting commands to
3889 * the hardware allowing the GPU to stall. We never generate a
3890 * false-positive for idleness, thus busy-ioctl is reliable at the
3891 * most fundamental level, and we maintain the guarantee that a
3892 * busy object left to itself will eventually become idle (and stay
3893 * idle!).
3894 *
3895 * We allow ourselves the leeway of potentially misreporting the busy
3896 * state because that is an optimisation heuristic that is constantly
3897 * in flux. Being quickly able to detect the busy/idle state is much
3898 * more important than accurate logging of exactly which engines were
3899 * busy.
3900 *
3901 * For accuracy in reporting the engine, we could use
3902 *
3903 * result = 0;
3904 * request = __i915_gem_active_get_rcu(active);
3905 * if (request) {
3906 * if (!i915_gem_request_completed(request))
3907 * result = flag(request->engine->exec_id);
3908 * i915_gem_request_put(request);
3909 * }
3910 *
3911 * but that still remains susceptible to both hardware and userspace
3912 * races. So we accept making the result of that race slightly worse,
3913 * given the rarity of the race and its low impact on the result.
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003914 */
Chris Wilson12555012016-08-16 09:50:40 +01003915 return flag(READ_ONCE(request->engine->exec_id));
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003916}
3917
Chris Wilsonedf6b762016-08-09 09:23:33 +01003918static __always_inline unsigned int
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003919busy_check_reader(const struct i915_gem_active *active)
3920{
3921 return __busy_set_if_active(active, __busy_read_flag);
3922}
3923
Chris Wilsonedf6b762016-08-09 09:23:33 +01003924static __always_inline unsigned int
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003925busy_check_writer(const struct i915_gem_active *active)
3926{
3927 return __busy_set_if_active(active, __busy_write_id);
3928}
3929
Eric Anholt673a3942008-07-30 12:06:12 -07003930int
Eric Anholt673a3942008-07-30 12:06:12 -07003931i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003932 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003933{
3934 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003935 struct drm_i915_gem_object *obj;
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003936 unsigned long active;
Eric Anholt673a3942008-07-30 12:06:12 -07003937
Chris Wilson03ac0642016-07-20 13:31:51 +01003938 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003939 if (!obj)
3940 return -ENOENT;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003941
Chris Wilson426960b2016-01-15 16:51:46 +00003942 args->busy = 0;
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003943 active = __I915_BO_ACTIVE(obj);
3944 if (active) {
3945 int idx;
Chris Wilson426960b2016-01-15 16:51:46 +00003946
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003947 /* Yes, the lookups are intentionally racy.
3948 *
3949 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
3950 * to regard the value as stale and as our ABI guarantees
3951 * forward progress, we confirm the status of each active
3952 * request with the hardware.
3953 *
3954 * Even though we guard the pointer lookup by RCU, that only
3955 * guarantees that the pointer and its contents remain
3956 * dereferencable and does *not* mean that the request we
3957 * have is the same as the one being tracked by the object.
3958 *
3959 * Consider that we lookup the request just as it is being
3960 * retired and freed. We take a local copy of the pointer,
3961 * but before we add its engine into the busy set, the other
3962 * thread reallocates it and assigns it to a task on another
Chris Wilson12555012016-08-16 09:50:40 +01003963 * engine with a fresh and incomplete seqno. Guarding against
3964 * that requires careful serialisation and reference counting,
3965 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
3966 * instead we expect that if the result is busy, which engines
3967 * are busy is not completely reliable - we only guarantee
3968 * that the object was busy.
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003969 */
3970 rcu_read_lock();
3971
3972 for_each_active(active, idx)
3973 args->busy |= busy_check_reader(&obj->last_read[idx]);
3974
3975 /* For ABI sanity, we only care that the write engine is in
Chris Wilson70cb4722016-08-09 18:08:25 +01003976 * the set of read engines. This should be ensured by the
3977 * ordering of setting last_read/last_write in
3978 * i915_vma_move_to_active(), and then in reverse in retire.
3979 * However, for good measure, we always report the last_write
3980 * request as a busy read as well as being a busy write.
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003981 *
3982 * We don't care that the set of active read/write engines
3983 * may change during construction of the result, as it is
3984 * equally liable to change before userspace can inspect
3985 * the result.
3986 */
3987 args->busy |= busy_check_writer(&obj->last_write);
3988
3989 rcu_read_unlock();
Chris Wilson426960b2016-01-15 16:51:46 +00003990 }
Eric Anholt673a3942008-07-30 12:06:12 -07003991
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003992 i915_gem_object_put_unlocked(obj);
3993 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003994}
3995
3996int
3997i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3998 struct drm_file *file_priv)
3999{
Akshay Joshi0206e352011-08-16 15:34:10 -04004000 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004001}
4002
Chris Wilson3ef94da2009-09-14 16:50:29 +01004003int
4004i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4005 struct drm_file *file_priv)
4006{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004007 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004008 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004009 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004010 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004011
4012 switch (args->madv) {
4013 case I915_MADV_DONTNEED:
4014 case I915_MADV_WILLNEED:
4015 break;
4016 default:
4017 return -EINVAL;
4018 }
4019
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004020 ret = i915_mutex_lock_interruptible(dev);
4021 if (ret)
4022 return ret;
4023
Chris Wilson03ac0642016-07-20 13:31:51 +01004024 obj = i915_gem_object_lookup(file_priv, args->handle);
4025 if (!obj) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004026 ret = -ENOENT;
4027 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004028 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004029
Daniel Vetter656bfa32014-11-20 09:26:30 +01004030 if (obj->pages &&
Chris Wilson3e510a82016-08-05 10:14:23 +01004031 i915_gem_object_is_tiled(obj) &&
Daniel Vetter656bfa32014-11-20 09:26:30 +01004032 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4033 if (obj->madv == I915_MADV_WILLNEED)
4034 i915_gem_object_unpin_pages(obj);
4035 if (args->madv == I915_MADV_WILLNEED)
4036 i915_gem_object_pin_pages(obj);
4037 }
4038
Chris Wilson05394f32010-11-08 19:18:58 +00004039 if (obj->madv != __I915_MADV_PURGED)
4040 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004041
Chris Wilson6c085a72012-08-20 11:40:46 +02004042 /* if the object is no longer attached, discard its backing storage */
Daniel Vetterbe6a0372015-03-18 10:46:04 +01004043 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004044 i915_gem_object_truncate(obj);
4045
Chris Wilson05394f32010-11-08 19:18:58 +00004046 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004047
Chris Wilsonf8c417c2016-07-20 13:31:53 +01004048 i915_gem_object_put(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004049unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004050 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004051 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004052}
4053
Chris Wilson37e680a2012-06-07 15:38:42 +01004054void i915_gem_object_init(struct drm_i915_gem_object *obj,
4055 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004056{
Chris Wilsonb4716182015-04-27 13:41:17 +01004057 int i;
4058
Ben Widawsky35c20a62013-05-31 11:28:48 -07004059 INIT_LIST_HEAD(&obj->global_list);
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00004060 for (i = 0; i < I915_NUM_ENGINES; i++)
Chris Wilsonfa545cb2016-08-04 07:52:35 +01004061 init_request_active(&obj->last_read[i],
4062 i915_gem_object_retire__read);
4063 init_request_active(&obj->last_write,
4064 i915_gem_object_retire__write);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004065 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004066 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson8d9d5742015-04-07 16:20:38 +01004067 INIT_LIST_HEAD(&obj->batch_pool_link);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004068
Chris Wilson37e680a2012-06-07 15:38:42 +01004069 obj->ops = ops;
4070
Chris Wilson50349242016-08-18 17:17:04 +01004071 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
Chris Wilson0327d6b2012-08-11 15:41:06 +01004072 obj->madv = I915_MADV_WILLNEED;
Chris Wilson0327d6b2012-08-11 15:41:06 +01004073
Dave Gordonf19ec8c2016-07-04 11:34:37 +01004074 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004075}
4076
Chris Wilson37e680a2012-06-07 15:38:42 +01004077static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
Chris Wilsonde472662016-01-22 18:32:31 +00004078 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
Chris Wilson37e680a2012-06-07 15:38:42 +01004079 .get_pages = i915_gem_object_get_pages_gtt,
4080 .put_pages = i915_gem_object_put_pages_gtt,
4081};
4082
Dave Gordond37cd8a2016-04-22 19:14:32 +01004083struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004084 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004085{
Daniel Vetterc397b902010-04-09 19:05:07 +00004086 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004087 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004088 gfp_t mask;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004089 int ret;
Daniel Vetterc397b902010-04-09 19:05:07 +00004090
Chris Wilson42dcedd2012-11-15 11:32:30 +00004091 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004092 if (obj == NULL)
Chris Wilsonfe3db792016-04-25 13:32:13 +01004093 return ERR_PTR(-ENOMEM);
Daniel Vetterc397b902010-04-09 19:05:07 +00004094
Chris Wilsonfe3db792016-04-25 13:32:13 +01004095 ret = drm_gem_object_init(dev, &obj->base, size);
4096 if (ret)
4097 goto fail;
Daniel Vetterc397b902010-04-09 19:05:07 +00004098
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004099 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4100 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4101 /* 965gm cannot relocate objects above 4GiB. */
4102 mask &= ~__GFP_HIGHMEM;
4103 mask |= __GFP_DMA32;
4104 }
4105
Al Viro93c76a32015-12-04 23:45:44 -05004106 mapping = obj->base.filp->f_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004107 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004108
Chris Wilson37e680a2012-06-07 15:38:42 +01004109 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004110
Daniel Vetterc397b902010-04-09 19:05:07 +00004111 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4112 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4113
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004114 if (HAS_LLC(dev)) {
4115 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004116 * cache) for about a 10% performance improvement
4117 * compared to uncached. Graphics requests other than
4118 * display scanout are coherent with the CPU in
4119 * accessing this cache. This means in this mode we
4120 * don't need to clflush on the CPU side, and on the
4121 * GPU side we only need to flush internal caches to
4122 * get data visible to the CPU.
4123 *
4124 * However, we maintain the display planes as UC, and so
4125 * need to rebind when first used as such.
4126 */
4127 obj->cache_level = I915_CACHE_LLC;
4128 } else
4129 obj->cache_level = I915_CACHE_NONE;
4130
Daniel Vetterd861e332013-07-24 23:25:03 +02004131 trace_i915_gem_object_create(obj);
4132
Chris Wilson05394f32010-11-08 19:18:58 +00004133 return obj;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004134
4135fail:
4136 i915_gem_object_free(obj);
4137
4138 return ERR_PTR(ret);
Daniel Vetterac52bc52010-04-09 19:05:06 +00004139}
4140
Chris Wilson340fbd82014-05-22 09:16:52 +01004141static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4142{
4143 /* If we are the last user of the backing storage (be it shmemfs
4144 * pages or stolen etc), we know that the pages are going to be
4145 * immediately released. In this case, we can then skip copying
4146 * back the contents from the GPU.
4147 */
4148
4149 if (obj->madv != I915_MADV_WILLNEED)
4150 return false;
4151
4152 if (obj->base.filp == NULL)
4153 return true;
4154
4155 /* At first glance, this looks racy, but then again so would be
4156 * userspace racing mmap against close. However, the first external
4157 * reference to the filp can only be obtained through the
4158 * i915_gem_mmap_ioctl() which safeguards us against the user
4159 * acquiring such a reference whilst we are in the middle of
4160 * freeing the object.
4161 */
4162 return atomic_long_read(&obj->base.filp->f_count) == 1;
4163}
4164
Chris Wilson1488fc02012-04-24 15:47:31 +01004165void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004166{
Chris Wilson1488fc02012-04-24 15:47:31 +01004167 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004168 struct drm_device *dev = obj->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004169 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004170 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004171
Paulo Zanonif65c9162013-11-27 18:20:34 -02004172 intel_runtime_pm_get(dev_priv);
4173
Chris Wilson26e12f892011-03-20 11:20:19 +00004174 trace_i915_gem_object_destroy(obj);
4175
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004176 /* All file-owned VMA should have been released by this point through
4177 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4178 * However, the object may also be bound into the global GTT (e.g.
4179 * older GPUs without per-process support, or for direct access through
4180 * the GTT either for the user or for scanout). Those VMA still need to
4181 * unbound now.
4182 */
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004183 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
Chris Wilson3272db52016-08-04 16:32:32 +01004184 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004185 GEM_BUG_ON(i915_vma_is_active(vma));
Chris Wilson3272db52016-08-04 16:32:32 +01004186 vma->flags &= ~I915_VMA_PIN_MASK;
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004187 i915_vma_close(vma);
Chris Wilson1488fc02012-04-24 15:47:31 +01004188 }
Chris Wilson15717de2016-08-04 07:52:26 +01004189 GEM_BUG_ON(obj->bind_count);
Chris Wilson1488fc02012-04-24 15:47:31 +01004190
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004191 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4192 * before progressing. */
4193 if (obj->stolen)
4194 i915_gem_object_unpin_pages(obj);
4195
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004196 WARN_ON(atomic_read(&obj->frontbuffer_bits));
Daniel Vettera071fa02014-06-18 23:28:09 +02004197
Daniel Vetter656bfa32014-11-20 09:26:30 +01004198 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4199 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
Chris Wilson3e510a82016-08-05 10:14:23 +01004200 i915_gem_object_is_tiled(obj))
Daniel Vetter656bfa32014-11-20 09:26:30 +01004201 i915_gem_object_unpin_pages(obj);
4202
Ben Widawsky401c29f2013-05-31 11:28:47 -07004203 if (WARN_ON(obj->pages_pin_count))
4204 obj->pages_pin_count = 0;
Chris Wilson340fbd82014-05-22 09:16:52 +01004205 if (discard_backing_storage(obj))
Chris Wilson55372522014-03-25 13:23:06 +00004206 obj->madv = I915_MADV_DONTNEED;
Chris Wilson37e680a2012-06-07 15:38:42 +01004207 i915_gem_object_put_pages(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004208
Chris Wilson9da3da62012-06-01 15:20:22 +01004209 BUG_ON(obj->pages);
4210
Chris Wilson2f745ad2012-09-04 21:02:58 +01004211 if (obj->base.import_attach)
4212 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004213
Chris Wilson5cc9ed42014-05-16 14:22:37 +01004214 if (obj->ops->release)
4215 obj->ops->release(obj);
4216
Chris Wilson05394f32010-11-08 19:18:58 +00004217 drm_gem_object_release(&obj->base);
4218 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004219
Chris Wilson05394f32010-11-08 19:18:58 +00004220 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004221 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004222
4223 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004224}
4225
Chris Wilsondcff85c2016-08-05 10:14:11 +01004226int i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004227{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004228 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsondcff85c2016-08-05 10:14:11 +01004229 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004230
Chris Wilson54b4f682016-07-21 21:16:19 +01004231 intel_suspend_gt_powersave(dev_priv);
4232
Chris Wilson45c5f202013-10-16 11:50:01 +01004233 mutex_lock(&dev->struct_mutex);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004234
4235 /* We have to flush all the executing contexts to main memory so
4236 * that they can saved in the hibernation image. To ensure the last
4237 * context image is coherent, we have to switch away from it. That
4238 * leaves the dev_priv->kernel_context still active when
4239 * we actually suspend, and its image in memory may not match the GPU
4240 * state. Fortunately, the kernel_context is disposable and we do
4241 * not rely on its state.
4242 */
4243 ret = i915_gem_switch_to_kernel_context(dev_priv);
4244 if (ret)
4245 goto err;
4246
Chris Wilsondcff85c2016-08-05 10:14:11 +01004247 ret = i915_gem_wait_for_idle(dev_priv, true);
Chris Wilsonf7403342013-09-13 23:57:04 +01004248 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004249 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004250
Chris Wilsonc0336662016-05-06 15:40:21 +01004251 i915_gem_retire_requests(dev_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004252
Chris Wilsonb2e862d2016-04-28 09:56:41 +01004253 i915_gem_context_lost(dev_priv);
Chris Wilson45c5f202013-10-16 11:50:01 +01004254 mutex_unlock(&dev->struct_mutex);
4255
Chris Wilson737b1502015-01-26 18:03:03 +02004256 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson67d97da2016-07-04 08:08:31 +01004257 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4258 flush_delayed_work(&dev_priv->gt.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004259
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004260 /* Assert that we sucessfully flushed all the work and
4261 * reset the GPU back to its idle, low power state.
4262 */
Chris Wilson67d97da2016-07-04 08:08:31 +01004263 WARN_ON(dev_priv->gt.awake);
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004264
Eric Anholt673a3942008-07-30 12:06:12 -07004265 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004266
4267err:
4268 mutex_unlock(&dev->struct_mutex);
4269 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004270}
4271
Chris Wilson5ab57c72016-07-15 14:56:20 +01004272void i915_gem_resume(struct drm_device *dev)
4273{
4274 struct drm_i915_private *dev_priv = to_i915(dev);
4275
4276 mutex_lock(&dev->struct_mutex);
4277 i915_gem_restore_gtt_mappings(dev);
4278
4279 /* As we didn't flush the kernel context before suspend, we cannot
4280 * guarantee that the context image is complete. So let's just reset
4281 * it and start again.
4282 */
4283 if (i915.enable_execlists)
4284 intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
4285
4286 mutex_unlock(&dev->struct_mutex);
4287}
4288
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004289void i915_gem_init_swizzling(struct drm_device *dev)
4290{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004291 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004292
Daniel Vetter11782b02012-01-31 16:47:55 +01004293 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004294 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4295 return;
4296
4297 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4298 DISP_TILE_SURFACE_SWIZZLING);
4299
Daniel Vetter11782b02012-01-31 16:47:55 +01004300 if (IS_GEN5(dev))
4301 return;
4302
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004303 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4304 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004305 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004306 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004307 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004308 else if (IS_GEN8(dev))
4309 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004310 else
4311 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004312}
Daniel Vettere21af882012-02-09 20:53:27 +01004313
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004314static void init_unused_ring(struct drm_device *dev, u32 base)
4315{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004316 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004317
4318 I915_WRITE(RING_CTL(base), 0);
4319 I915_WRITE(RING_HEAD(base), 0);
4320 I915_WRITE(RING_TAIL(base), 0);
4321 I915_WRITE(RING_START(base), 0);
4322}
4323
4324static void init_unused_rings(struct drm_device *dev)
4325{
4326 if (IS_I830(dev)) {
4327 init_unused_ring(dev, PRB1_BASE);
4328 init_unused_ring(dev, SRB0_BASE);
4329 init_unused_ring(dev, SRB1_BASE);
4330 init_unused_ring(dev, SRB2_BASE);
4331 init_unused_ring(dev, SRB3_BASE);
4332 } else if (IS_GEN2(dev)) {
4333 init_unused_ring(dev, SRB0_BASE);
4334 init_unused_ring(dev, SRB1_BASE);
4335 } else if (IS_GEN3(dev)) {
4336 init_unused_ring(dev, PRB1_BASE);
4337 init_unused_ring(dev, PRB2_BASE);
4338 }
4339}
4340
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004341int
4342i915_gem_init_hw(struct drm_device *dev)
4343{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004344 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004345 struct intel_engine_cs *engine;
Chris Wilsond200cda2016-04-28 09:56:44 +01004346 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004347
Chris Wilson5e4f5182015-02-13 14:35:59 +00004348 /* Double layer security blanket, see i915_gem_init() */
4349 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4350
Mika Kuoppala3accaf72016-04-13 17:26:43 +03004351 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004352 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004353
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004354 if (IS_HASWELL(dev))
4355 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4356 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004357
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004358 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004359 if (IS_IVYBRIDGE(dev)) {
4360 u32 temp = I915_READ(GEN7_MSG_CTL);
4361 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4362 I915_WRITE(GEN7_MSG_CTL, temp);
4363 } else if (INTEL_INFO(dev)->gen >= 7) {
4364 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4365 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4366 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4367 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004368 }
4369
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004370 i915_gem_init_swizzling(dev);
4371
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004372 /*
4373 * At least 830 can leave some of the unused rings
4374 * "active" (ie. head != tail) after resume which
4375 * will prevent c3 entry. Makes sure all unused rings
4376 * are totally idle.
4377 */
4378 init_unused_rings(dev);
4379
Dave Gordoned54c1a2016-01-19 19:02:54 +00004380 BUG_ON(!dev_priv->kernel_context);
John Harrison90638cc2015-05-29 17:43:37 +01004381
John Harrison4ad2fd82015-06-18 13:11:20 +01004382 ret = i915_ppgtt_init_hw(dev);
4383 if (ret) {
4384 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4385 goto out;
4386 }
4387
4388 /* Need to do basic initialisation of all rings first: */
Dave Gordonb4ac5af2016-03-24 11:20:38 +00004389 for_each_engine(engine, dev_priv) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004390 ret = engine->init_hw(engine);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004391 if (ret)
Chris Wilson5e4f5182015-02-13 14:35:59 +00004392 goto out;
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004393 }
Mika Kuoppala99433932013-01-22 14:12:17 +02004394
Peter Antoine0ccdacf2016-04-13 15:03:25 +01004395 intel_mocs_init_l3cc_table(dev);
4396
Alex Dai33a732f2015-08-12 15:43:36 +01004397 /* We can't enable contexts until all firmware is loaded */
Dave Gordone556f7c2016-06-07 09:14:49 +01004398 ret = intel_guc_setup(dev);
4399 if (ret)
4400 goto out;
Alex Dai33a732f2015-08-12 15:43:36 +01004401
Chris Wilson5e4f5182015-02-13 14:35:59 +00004402out:
4403 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004404 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004405}
4406
Chris Wilson39df9192016-07-20 13:31:57 +01004407bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4408{
4409 if (INTEL_INFO(dev_priv)->gen < 6)
4410 return false;
4411
4412 /* TODO: make semaphores and Execlists play nicely together */
4413 if (i915.enable_execlists)
4414 return false;
4415
4416 if (value >= 0)
4417 return value;
4418
4419#ifdef CONFIG_INTEL_IOMMU
4420 /* Enable semaphores on SNB when IO remapping is off */
4421 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4422 return false;
4423#endif
4424
4425 return true;
4426}
4427
Chris Wilson1070a422012-04-24 15:47:41 +01004428int i915_gem_init(struct drm_device *dev)
4429{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004430 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson1070a422012-04-24 15:47:41 +01004431 int ret;
4432
Chris Wilson1070a422012-04-24 15:47:41 +01004433 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004434
Oscar Mateoa83014d2014-07-24 17:04:21 +01004435 if (!i915.enable_execlists) {
Chris Wilson7e37f882016-08-02 22:50:21 +01004436 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
Oscar Mateo454afeb2014-07-24 17:04:22 +01004437 } else {
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004438 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
Oscar Mateoa83014d2014-07-24 17:04:21 +01004439 }
4440
Chris Wilson5e4f5182015-02-13 14:35:59 +00004441 /* This is just a security blanket to placate dragons.
4442 * On some systems, we very sporadically observe that the first TLBs
4443 * used by the CS may be stale, despite us poking the TLB reset. If
4444 * we hold the forcewake during initialisation these problems
4445 * just magically go away.
4446 */
4447 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4448
Chris Wilson72778cb2016-05-19 16:17:16 +01004449 i915_gem_init_userptr(dev_priv);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01004450
4451 ret = i915_gem_init_ggtt(dev_priv);
4452 if (ret)
4453 goto out_unlock;
Jesse Barnesd62b4892013-03-08 10:45:53 -08004454
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004455 ret = i915_gem_context_init(dev);
Jani Nikula7bcc3772014-12-05 14:17:42 +02004456 if (ret)
4457 goto out_unlock;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004458
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01004459 ret = intel_engines_init(dev);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004460 if (ret)
Jani Nikula7bcc3772014-12-05 14:17:42 +02004461 goto out_unlock;
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004462
4463 ret = i915_gem_init_hw(dev);
Chris Wilson60990322014-04-09 09:19:42 +01004464 if (ret == -EIO) {
Chris Wilson7e21d642016-07-27 09:07:29 +01004465 /* Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01004466 * wedged. But we only want to do this where the GPU is angry,
4467 * for all other failure, such as an allocation failure, bail.
4468 */
4469 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
Peter Zijlstra805de8f42015-04-24 01:12:32 +02004470 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
Chris Wilson60990322014-04-09 09:19:42 +01004471 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01004472 }
Jani Nikula7bcc3772014-12-05 14:17:42 +02004473
4474out_unlock:
Chris Wilson5e4f5182015-02-13 14:35:59 +00004475 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Chris Wilson60990322014-04-09 09:19:42 +01004476 mutex_unlock(&dev->struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01004477
Chris Wilson60990322014-04-09 09:19:42 +01004478 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01004479}
4480
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004481void
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004482i915_gem_cleanup_engines(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004483{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004484 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004485 struct intel_engine_cs *engine;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004486
Dave Gordonb4ac5af2016-03-24 11:20:38 +00004487 for_each_engine(engine, dev_priv)
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004488 dev_priv->gt.cleanup_engine(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004489}
4490
Chris Wilson64193402010-10-24 12:38:05 +01004491static void
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00004492init_engine_lists(struct intel_engine_cs *engine)
Chris Wilson64193402010-10-24 12:38:05 +01004493{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00004494 INIT_LIST_HEAD(&engine->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004495}
4496
Eric Anholt673a3942008-07-30 12:06:12 -07004497void
Imre Deak40ae4e12016-03-16 14:54:03 +02004498i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4499{
Chris Wilson91c8a322016-07-05 10:40:23 +01004500 struct drm_device *dev = &dev_priv->drm;
Chris Wilson49ef5292016-08-18 17:17:00 +01004501 int i;
Imre Deak40ae4e12016-03-16 14:54:03 +02004502
4503 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4504 !IS_CHERRYVIEW(dev_priv))
4505 dev_priv->num_fence_regs = 32;
4506 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4507 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4508 dev_priv->num_fence_regs = 16;
4509 else
4510 dev_priv->num_fence_regs = 8;
4511
Chris Wilsonc0336662016-05-06 15:40:21 +01004512 if (intel_vgpu_active(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02004513 dev_priv->num_fence_regs =
4514 I915_READ(vgtif_reg(avail_rs.fence_num));
4515
4516 /* Initialize fence registers to zero */
Chris Wilson49ef5292016-08-18 17:17:00 +01004517 for (i = 0; i < dev_priv->num_fence_regs; i++) {
4518 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4519
4520 fence->i915 = dev_priv;
4521 fence->id = i;
4522 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4523 }
Imre Deak40ae4e12016-03-16 14:54:03 +02004524 i915_gem_restore_fences(dev);
4525
4526 i915_gem_detect_bit_6_swizzle(dev);
4527}
4528
4529void
Imre Deakd64aa092016-01-19 15:26:29 +02004530i915_gem_load_init(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004531{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004532 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004533 int i;
4534
Chris Wilsonefab6d82015-04-07 16:20:57 +01004535 dev_priv->objects =
Chris Wilson42dcedd2012-11-15 11:32:30 +00004536 kmem_cache_create("i915_gem_object",
4537 sizeof(struct drm_i915_gem_object), 0,
4538 SLAB_HWCACHE_ALIGN,
4539 NULL);
Chris Wilsone20d2ab2015-04-07 16:20:58 +01004540 dev_priv->vmas =
4541 kmem_cache_create("i915_gem_vma",
4542 sizeof(struct i915_vma), 0,
4543 SLAB_HWCACHE_ALIGN,
4544 NULL);
Chris Wilsonefab6d82015-04-07 16:20:57 +01004545 dev_priv->requests =
4546 kmem_cache_create("i915_gem_request",
4547 sizeof(struct drm_i915_gem_request), 0,
Chris Wilson0eafec62016-08-04 16:32:41 +01004548 SLAB_HWCACHE_ALIGN |
4549 SLAB_RECLAIM_ACCOUNT |
4550 SLAB_DESTROY_BY_RCU,
Chris Wilsonefab6d82015-04-07 16:20:57 +01004551 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004552
Ben Widawskya33afea2013-09-17 21:12:45 -07004553 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004554 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4555 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004556 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00004557 for (i = 0; i < I915_NUM_ENGINES; i++)
4558 init_engine_lists(&dev_priv->engine[i]);
Chris Wilson67d97da2016-07-04 08:08:31 +01004559 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
Eric Anholt673a3942008-07-30 12:06:12 -07004560 i915_gem_retire_work_handler);
Chris Wilson67d97da2016-07-04 08:08:31 +01004561 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004562 i915_gem_idle_work_handler);
Chris Wilson1f15b762016-07-01 17:23:14 +01004563 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004564 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004565
Chris Wilson72bfa192010-12-19 11:42:05 +00004566 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4567
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004568 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004569
Chris Wilsonce453d82011-02-21 14:43:56 +00004570 dev_priv->mm.interruptible = true;
4571
Chris Wilsonb5add952016-08-04 16:32:36 +01004572 spin_lock_init(&dev_priv->fb_tracking.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004573}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004574
Imre Deakd64aa092016-01-19 15:26:29 +02004575void i915_gem_load_cleanup(struct drm_device *dev)
4576{
4577 struct drm_i915_private *dev_priv = to_i915(dev);
4578
4579 kmem_cache_destroy(dev_priv->requests);
4580 kmem_cache_destroy(dev_priv->vmas);
4581 kmem_cache_destroy(dev_priv->objects);
Chris Wilson0eafec62016-08-04 16:32:41 +01004582
4583 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4584 rcu_barrier();
Imre Deakd64aa092016-01-19 15:26:29 +02004585}
4586
Chris Wilson461fb992016-05-14 07:26:33 +01004587int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4588{
4589 struct drm_i915_gem_object *obj;
4590
4591 /* Called just before we write the hibernation image.
4592 *
4593 * We need to update the domain tracking to reflect that the CPU
4594 * will be accessing all the pages to create and restore from the
4595 * hibernation, and so upon restoration those pages will be in the
4596 * CPU domain.
4597 *
4598 * To make sure the hibernation image contains the latest state,
4599 * we update that state just before writing out the image.
4600 */
4601
4602 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
4603 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4604 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4605 }
4606
4607 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4608 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4609 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4610 }
4611
4612 return 0;
4613}
4614
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004615void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004616{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004617 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilson15f7bbc2016-07-26 12:01:52 +01004618 struct drm_i915_gem_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00004619
4620 /* Clean up our request list when the client is going away, so that
4621 * later retire_requests won't dereference our soon-to-be-gone
4622 * file_priv.
4623 */
Chris Wilson1c255952010-09-26 11:03:27 +01004624 spin_lock(&file_priv->mm.lock);
Chris Wilson15f7bbc2016-07-26 12:01:52 +01004625 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004626 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01004627 spin_unlock(&file_priv->mm.lock);
Chris Wilson31169712009-09-14 16:50:28 +01004628
Chris Wilson2e1b8732015-04-27 13:41:22 +01004629 if (!list_empty(&file_priv->rps.link)) {
Chris Wilson8d3afd72015-05-21 21:01:47 +01004630 spin_lock(&to_i915(dev)->rps.client_lock);
Chris Wilson2e1b8732015-04-27 13:41:22 +01004631 list_del(&file_priv->rps.link);
Chris Wilson8d3afd72015-05-21 21:01:47 +01004632 spin_unlock(&to_i915(dev)->rps.client_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004633 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004634}
4635
4636int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4637{
4638 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004639 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004640
4641 DRM_DEBUG_DRIVER("\n");
4642
4643 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4644 if (!file_priv)
4645 return -ENOMEM;
4646
4647 file->driver_priv = file_priv;
Dave Gordonf19ec8c2016-07-04 11:34:37 +01004648 file_priv->dev_priv = to_i915(dev);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02004649 file_priv->file = file;
Chris Wilson2e1b8732015-04-27 13:41:22 +01004650 INIT_LIST_HEAD(&file_priv->rps.link);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004651
4652 spin_lock_init(&file_priv->mm.lock);
4653 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004654
Chris Wilsonc80ff162016-07-27 09:07:27 +01004655 file_priv->bsd_engine = -1;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00004656
Ben Widawskye422b882013-12-06 14:10:58 -08004657 ret = i915_gem_context_open(dev, file);
4658 if (ret)
4659 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004660
Ben Widawskye422b882013-12-06 14:10:58 -08004661 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004662}
4663
Daniel Vetterb680c372014-09-19 18:27:27 +02004664/**
4665 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07004666 * @old: current GEM buffer for the frontbuffer slots
4667 * @new: new GEM buffer for the frontbuffer slots
4668 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02004669 *
4670 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4671 * from @old and setting them in @new. Both @old and @new can be NULL.
4672 */
Daniel Vettera071fa02014-06-18 23:28:09 +02004673void i915_gem_track_fb(struct drm_i915_gem_object *old,
4674 struct drm_i915_gem_object *new,
4675 unsigned frontbuffer_bits)
4676{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004677 /* Control of individual bits within the mask are guarded by
4678 * the owning plane->mutex, i.e. we can never see concurrent
4679 * manipulation of individual bits. But since the bitfield as a whole
4680 * is updated using RMW, we need to use atomics in order to update
4681 * the bits.
4682 */
4683 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4684 sizeof(atomic_t) * BITS_PER_BYTE);
4685
Daniel Vettera071fa02014-06-18 23:28:09 +02004686 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004687 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4688 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02004689 }
4690
4691 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004692 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4693 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02004694 }
4695}
4696
Dave Gordon033908a2015-12-10 18:51:23 +00004697/* Like i915_gem_object_get_page(), but mark the returned page dirty */
4698struct page *
4699i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4700{
4701 struct page *page;
4702
4703 /* Only default objects have per-page dirty tracking */
Chris Wilsonb9bcd142016-06-20 15:05:51 +01004704 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
Dave Gordon033908a2015-12-10 18:51:23 +00004705 return NULL;
4706
4707 page = i915_gem_object_get_page(obj, n);
4708 set_page_dirty(page);
4709 return page;
4710}
4711
Dave Gordonea702992015-07-09 19:29:02 +01004712/* Allocate a new GEM object and fill it with the supplied data */
4713struct drm_i915_gem_object *
4714i915_gem_object_create_from_data(struct drm_device *dev,
4715 const void *data, size_t size)
4716{
4717 struct drm_i915_gem_object *obj;
4718 struct sg_table *sg;
4719 size_t bytes;
4720 int ret;
4721
Dave Gordond37cd8a2016-04-22 19:14:32 +01004722 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
Chris Wilsonfe3db792016-04-25 13:32:13 +01004723 if (IS_ERR(obj))
Dave Gordonea702992015-07-09 19:29:02 +01004724 return obj;
4725
4726 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4727 if (ret)
4728 goto fail;
4729
4730 ret = i915_gem_object_get_pages(obj);
4731 if (ret)
4732 goto fail;
4733
4734 i915_gem_object_pin_pages(obj);
4735 sg = obj->pages;
4736 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
Dave Gordon9e7d18c2015-12-10 18:51:24 +00004737 obj->dirty = 1; /* Backing store is now out of date */
Dave Gordonea702992015-07-09 19:29:02 +01004738 i915_gem_object_unpin_pages(obj);
4739
4740 if (WARN_ON(bytes != size)) {
4741 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4742 ret = -EFAULT;
4743 goto fail;
4744 }
4745
4746 return obj;
4747
4748fail:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01004749 i915_gem_object_put(obj);
Dave Gordonea702992015-07-09 19:29:02 +01004750 return ERR_PTR(ret);
4751}