blob: 4ea3e537eceb9bdec72f6cdd3c6952d11567a1e3 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Yu Zhangeb822892015-02-10 19:05:49 +080032#include "i915_vgpu.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010033#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070034#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010035#include "intel_frontbuffer.h"
Peter Antoine0ccdacf2016-04-13 15:03:25 +010036#include "intel_mocs.h"
Chris Wilsonc13d87e2016-07-20 09:21:15 +010037#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070038#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070040#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080041#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020042#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070043
Chris Wilsonfbbd37b2016-10-28 13:58:42 +010044static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
Chris Wilson05394f32010-11-08 19:18:58 +000045static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Daniel Vettere62b59e2015-01-21 14:53:48 +010046static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson61050802012-04-17 15:31:31 +010047
Chris Wilsonc76ce032013-08-08 14:41:03 +010048static bool cpu_cache_is_coherent(struct drm_device *dev,
49 enum i915_cache_level level)
50{
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +000051 return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
Chris Wilsonc76ce032013-08-08 14:41:03 +010052}
53
Chris Wilson2c225692013-08-09 12:26:45 +010054static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
55{
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +053056 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
57 return false;
58
Chris Wilson2c225692013-08-09 12:26:45 +010059 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
60 return true;
61
62 return obj->pin_display;
63}
64
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053065static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010066insert_mappable_node(struct i915_ggtt *ggtt,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053067 struct drm_mm_node *node, u32 size)
68{
69 memset(node, 0, sizeof(*node));
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010070 return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
71 size, 0, -1,
72 0, ggtt->mappable_end,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053073 DRM_MM_SEARCH_DEFAULT,
74 DRM_MM_CREATE_DEFAULT);
75}
76
77static void
78remove_mappable_node(struct drm_mm_node *node)
79{
80 drm_mm_remove_node(node);
81}
82
Chris Wilson73aa8082010-09-30 11:46:12 +010083/* some bookkeeping */
84static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
Chris Wilson3ef7f222016-10-18 13:02:48 +010085 u64 size)
Chris Wilson73aa8082010-09-30 11:46:12 +010086{
Daniel Vetterc20e8352013-07-24 22:40:23 +020087 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010088 dev_priv->mm.object_count++;
89 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020090 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010091}
92
93static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
Chris Wilson3ef7f222016-10-18 13:02:48 +010094 u64 size)
Chris Wilson73aa8082010-09-30 11:46:12 +010095{
Daniel Vetterc20e8352013-07-24 22:40:23 +020096 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010097 dev_priv->mm.object_count--;
98 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020099 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100100}
101
Chris Wilson21dd3732011-01-26 15:55:56 +0000102static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100103i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100104{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100105 int ret;
106
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100107 might_sleep();
108
Chris Wilsond98c52c2016-04-13 17:35:05 +0100109 if (!i915_reset_in_progress(error))
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100110 return 0;
111
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200112 /*
113 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
114 * userspace. If it takes that long something really bad is going on and
115 * we should simply try to bail out and fail as gracefully as possible.
116 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100117 ret = wait_event_interruptible_timeout(error->reset_queue,
Chris Wilsond98c52c2016-04-13 17:35:05 +0100118 !i915_reset_in_progress(error),
Chris Wilsonb52992c2016-10-28 13:58:24 +0100119 I915_RESET_TIMEOUT);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200120 if (ret == 0) {
121 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
122 return -EIO;
123 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100124 return ret;
Chris Wilsond98c52c2016-04-13 17:35:05 +0100125 } else {
126 return 0;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200127 }
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100128}
129
Chris Wilson54cf91d2010-11-25 18:00:26 +0000130int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100131{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100132 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100133 int ret;
134
Daniel Vetter33196de2012-11-14 17:14:05 +0100135 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100136 if (ret)
137 return ret;
138
139 ret = mutex_lock_interruptible(&dev->struct_mutex);
140 if (ret)
141 return ret;
142
Chris Wilson76c1dec2010-09-25 11:22:51 +0100143 return 0;
144}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100145
Eric Anholt673a3942008-07-30 12:06:12 -0700146int
Eric Anholt5a125c32008-10-22 21:40:13 -0700147i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000148 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700149{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300150 struct drm_i915_private *dev_priv = to_i915(dev);
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200151 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300152 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100153 struct i915_vma *vma;
Chris Wilson6299f992010-11-24 12:23:44 +0000154 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700155
Chris Wilson6299f992010-11-24 12:23:44 +0000156 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100157 mutex_lock(&dev->struct_mutex);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000158 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100159 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100160 pinned += vma->node.size;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000161 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100162 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100163 pinned += vma->node.size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100164 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700165
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300166 args->aper_size = ggtt->base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400167 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000168
Eric Anholt5a125c32008-10-22 21:40:13 -0700169 return 0;
170}
171
Chris Wilson03ac84f2016-10-28 13:58:36 +0100172static struct sg_table *
Chris Wilson6a2c4232014-11-04 04:51:40 -0800173i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
Chris Wilson00731152014-05-21 12:42:56 +0100174{
Al Viro93c76a32015-12-04 23:45:44 -0500175 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800176 char *vaddr = obj->phys_handle->vaddr;
177 struct sg_table *st;
178 struct scatterlist *sg;
179 int i;
Chris Wilson00731152014-05-21 12:42:56 +0100180
Chris Wilson6a2c4232014-11-04 04:51:40 -0800181 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
Chris Wilson03ac84f2016-10-28 13:58:36 +0100182 return ERR_PTR(-EINVAL);
Chris Wilson00731152014-05-21 12:42:56 +0100183
Chris Wilson6a2c4232014-11-04 04:51:40 -0800184 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
185 struct page *page;
186 char *src;
187
188 page = shmem_read_mapping_page(mapping, i);
189 if (IS_ERR(page))
Chris Wilson03ac84f2016-10-28 13:58:36 +0100190 return ERR_CAST(page);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800191
192 src = kmap_atomic(page);
193 memcpy(vaddr, src, PAGE_SIZE);
194 drm_clflush_virt_range(vaddr, PAGE_SIZE);
195 kunmap_atomic(src);
196
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300197 put_page(page);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800198 vaddr += PAGE_SIZE;
199 }
200
Chris Wilsonc0336662016-05-06 15:40:21 +0100201 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson6a2c4232014-11-04 04:51:40 -0800202
203 st = kmalloc(sizeof(*st), GFP_KERNEL);
204 if (st == NULL)
Chris Wilson03ac84f2016-10-28 13:58:36 +0100205 return ERR_PTR(-ENOMEM);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800206
207 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
208 kfree(st);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100209 return ERR_PTR(-ENOMEM);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800210 }
211
212 sg = st->sgl;
213 sg->offset = 0;
214 sg->length = obj->base.size;
215
216 sg_dma_address(sg) = obj->phys_handle->busaddr;
217 sg_dma_len(sg) = obj->base.size;
218
Chris Wilson03ac84f2016-10-28 13:58:36 +0100219 return st;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800220}
221
222static void
Chris Wilson03ac84f2016-10-28 13:58:36 +0100223__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
Chris Wilson6a2c4232014-11-04 04:51:40 -0800224{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100225 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800226
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100227 if (obj->mm.madv == I915_MADV_DONTNEED)
228 obj->mm.dirty = false;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800229
Chris Wilson03ac84f2016-10-28 13:58:36 +0100230 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
231 i915_gem_clflush_object(obj, false);
232
233 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
234 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
235}
236
237static void
238i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
239 struct sg_table *pages)
240{
241 __i915_gem_object_release_shmem(obj);
242
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100243 if (obj->mm.dirty) {
Al Viro93c76a32015-12-04 23:45:44 -0500244 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800245 char *vaddr = obj->phys_handle->vaddr;
Chris Wilson00731152014-05-21 12:42:56 +0100246 int i;
247
248 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800249 struct page *page;
250 char *dst;
Chris Wilson00731152014-05-21 12:42:56 +0100251
Chris Wilson6a2c4232014-11-04 04:51:40 -0800252 page = shmem_read_mapping_page(mapping, i);
253 if (IS_ERR(page))
254 continue;
255
256 dst = kmap_atomic(page);
257 drm_clflush_virt_range(vaddr, PAGE_SIZE);
258 memcpy(dst, vaddr, PAGE_SIZE);
259 kunmap_atomic(dst);
260
261 set_page_dirty(page);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100262 if (obj->mm.madv == I915_MADV_WILLNEED)
Chris Wilson00731152014-05-21 12:42:56 +0100263 mark_page_accessed(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300264 put_page(page);
Chris Wilson00731152014-05-21 12:42:56 +0100265 vaddr += PAGE_SIZE;
266 }
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100267 obj->mm.dirty = false;
Chris Wilson00731152014-05-21 12:42:56 +0100268 }
269
Chris Wilson03ac84f2016-10-28 13:58:36 +0100270 sg_free_table(pages);
271 kfree(pages);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800272}
273
274static void
275i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
276{
277 drm_pci_free(obj->base.dev, obj->phys_handle);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100278 i915_gem_object_unpin_pages(obj);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800279}
280
281static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
282 .get_pages = i915_gem_object_get_pages_phys,
283 .put_pages = i915_gem_object_put_pages_phys,
284 .release = i915_gem_object_release_phys,
285};
286
Chris Wilson35a96112016-08-14 18:44:40 +0100287int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100288{
289 struct i915_vma *vma;
290 LIST_HEAD(still_in_list);
Chris Wilson02bef8f2016-08-14 18:44:41 +0100291 int ret;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100292
Chris Wilson02bef8f2016-08-14 18:44:41 +0100293 lockdep_assert_held(&obj->base.dev->struct_mutex);
294
295 /* Closed vma are removed from the obj->vma_list - but they may
296 * still have an active binding on the object. To remove those we
297 * must wait for all rendering to complete to the object (as unbinding
298 * must anyway), and retire the requests.
Chris Wilsonaa653a62016-08-04 07:52:27 +0100299 */
Chris Wilsone95433c2016-10-28 13:58:27 +0100300 ret = i915_gem_object_wait(obj,
301 I915_WAIT_INTERRUPTIBLE |
302 I915_WAIT_LOCKED |
303 I915_WAIT_ALL,
304 MAX_SCHEDULE_TIMEOUT,
305 NULL);
Chris Wilson02bef8f2016-08-14 18:44:41 +0100306 if (ret)
307 return ret;
308
309 i915_gem_retire_requests(to_i915(obj->base.dev));
310
Chris Wilsonaa653a62016-08-04 07:52:27 +0100311 while ((vma = list_first_entry_or_null(&obj->vma_list,
312 struct i915_vma,
313 obj_link))) {
314 list_move_tail(&vma->obj_link, &still_in_list);
315 ret = i915_vma_unbind(vma);
316 if (ret)
317 break;
318 }
319 list_splice(&still_in_list, &obj->vma_list);
320
321 return ret;
322}
323
Chris Wilsone95433c2016-10-28 13:58:27 +0100324static long
325i915_gem_object_wait_fence(struct dma_fence *fence,
326 unsigned int flags,
327 long timeout,
328 struct intel_rps_client *rps)
329{
330 struct drm_i915_gem_request *rq;
331
332 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
333
334 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
335 return timeout;
336
337 if (!dma_fence_is_i915(fence))
338 return dma_fence_wait_timeout(fence,
339 flags & I915_WAIT_INTERRUPTIBLE,
340 timeout);
341
342 rq = to_request(fence);
343 if (i915_gem_request_completed(rq))
344 goto out;
345
346 /* This client is about to stall waiting for the GPU. In many cases
347 * this is undesirable and limits the throughput of the system, as
348 * many clients cannot continue processing user input/output whilst
349 * blocked. RPS autotuning may take tens of milliseconds to respond
350 * to the GPU load and thus incurs additional latency for the client.
351 * We can circumvent that by promoting the GPU frequency to maximum
352 * before we wait. This makes the GPU throttle up much more quickly
353 * (good for benchmarks and user experience, e.g. window animations),
354 * but at a cost of spending more power processing the workload
355 * (bad for battery). Not all clients even want their results
356 * immediately and for them we should just let the GPU select its own
357 * frequency to maximise efficiency. To prevent a single client from
358 * forcing the clocks too high for the whole system, we only allow
359 * each client to waitboost once in a busy period.
360 */
361 if (rps) {
362 if (INTEL_GEN(rq->i915) >= 6)
363 gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
364 else
365 rps = NULL;
366 }
367
368 timeout = i915_wait_request(rq, flags, timeout);
369
370out:
371 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
372 i915_gem_request_retire_upto(rq);
373
Chris Wilsoncb399ea2016-11-01 10:03:16 +0000374 if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
Chris Wilsone95433c2016-10-28 13:58:27 +0100375 /* The GPU is now idle and this client has stalled.
376 * Since no other client has submitted a request in the
377 * meantime, assume that this client is the only one
378 * supplying work to the GPU but is unable to keep that
379 * work supplied because it is waiting. Since the GPU is
380 * then never kept fully busy, RPS autoclocking will
381 * keep the clocks relatively low, causing further delays.
382 * Compensate by giving the synchronous client credit for
383 * a waitboost next time.
384 */
385 spin_lock(&rq->i915->rps.client_lock);
386 list_del_init(&rps->link);
387 spin_unlock(&rq->i915->rps.client_lock);
388 }
389
390 return timeout;
391}
392
393static long
394i915_gem_object_wait_reservation(struct reservation_object *resv,
395 unsigned int flags,
396 long timeout,
397 struct intel_rps_client *rps)
398{
399 struct dma_fence *excl;
400
401 if (flags & I915_WAIT_ALL) {
402 struct dma_fence **shared;
403 unsigned int count, i;
404 int ret;
405
406 ret = reservation_object_get_fences_rcu(resv,
407 &excl, &count, &shared);
408 if (ret)
409 return ret;
410
411 for (i = 0; i < count; i++) {
412 timeout = i915_gem_object_wait_fence(shared[i],
413 flags, timeout,
414 rps);
415 if (timeout <= 0)
416 break;
417
418 dma_fence_put(shared[i]);
419 }
420
421 for (; i < count; i++)
422 dma_fence_put(shared[i]);
423 kfree(shared);
424 } else {
425 excl = reservation_object_get_excl_rcu(resv);
426 }
427
428 if (excl && timeout > 0)
429 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
430
431 dma_fence_put(excl);
432
433 return timeout;
434}
435
Chris Wilson00e60f22016-08-04 16:32:40 +0100436/**
Chris Wilsone95433c2016-10-28 13:58:27 +0100437 * Waits for rendering to the object to be completed
Chris Wilson00e60f22016-08-04 16:32:40 +0100438 * @obj: i915 gem object
Chris Wilsone95433c2016-10-28 13:58:27 +0100439 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
440 * @timeout: how long to wait
441 * @rps: client (user process) to charge for any waitboosting
Chris Wilson00e60f22016-08-04 16:32:40 +0100442 */
443int
Chris Wilsone95433c2016-10-28 13:58:27 +0100444i915_gem_object_wait(struct drm_i915_gem_object *obj,
445 unsigned int flags,
446 long timeout,
447 struct intel_rps_client *rps)
Chris Wilson00e60f22016-08-04 16:32:40 +0100448{
Chris Wilsone95433c2016-10-28 13:58:27 +0100449 might_sleep();
450#if IS_ENABLED(CONFIG_LOCKDEP)
451 GEM_BUG_ON(debug_locks &&
452 !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
453 !!(flags & I915_WAIT_LOCKED));
454#endif
455 GEM_BUG_ON(timeout < 0);
Chris Wilson00e60f22016-08-04 16:32:40 +0100456
Chris Wilsond07f0e52016-10-28 13:58:44 +0100457 timeout = i915_gem_object_wait_reservation(obj->resv,
458 flags, timeout,
459 rps);
Chris Wilsone95433c2016-10-28 13:58:27 +0100460 return timeout < 0 ? timeout : 0;
Chris Wilson00e60f22016-08-04 16:32:40 +0100461}
462
463static struct intel_rps_client *to_rps_client(struct drm_file *file)
464{
465 struct drm_i915_file_private *fpriv = file->driver_priv;
466
467 return &fpriv->rps;
468}
469
Chris Wilson00731152014-05-21 12:42:56 +0100470int
471i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
472 int align)
473{
474 drm_dma_handle_t *phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800475 int ret;
Chris Wilson00731152014-05-21 12:42:56 +0100476
477 if (obj->phys_handle) {
478 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
479 return -EBUSY;
480
481 return 0;
482 }
483
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100484 if (obj->mm.madv != I915_MADV_WILLNEED)
Chris Wilson00731152014-05-21 12:42:56 +0100485 return -EFAULT;
486
487 if (obj->base.filp == NULL)
488 return -EINVAL;
489
Chris Wilson4717ca92016-08-04 07:52:28 +0100490 ret = i915_gem_object_unbind(obj);
491 if (ret)
492 return ret;
493
Chris Wilson548625e2016-11-01 12:11:34 +0000494 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100495 if (obj->mm.pages)
496 return -EBUSY;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800497
Chris Wilson00731152014-05-21 12:42:56 +0100498 /* create a new object */
499 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
500 if (!phys)
501 return -ENOMEM;
502
Chris Wilson00731152014-05-21 12:42:56 +0100503 obj->phys_handle = phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800504 obj->ops = &i915_gem_phys_ops;
505
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100506 return i915_gem_object_pin_pages(obj);
Chris Wilson00731152014-05-21 12:42:56 +0100507}
508
509static int
510i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
511 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100512 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100513{
514 struct drm_device *dev = obj->base.dev;
515 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300516 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilsone95433c2016-10-28 13:58:27 +0100517 int ret;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800518
519 /* We manually control the domain here and pretend that it
520 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
521 */
Chris Wilsone95433c2016-10-28 13:58:27 +0100522 lockdep_assert_held(&obj->base.dev->struct_mutex);
523 ret = i915_gem_object_wait(obj,
524 I915_WAIT_INTERRUPTIBLE |
525 I915_WAIT_LOCKED |
526 I915_WAIT_ALL,
527 MAX_SCHEDULE_TIMEOUT,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100528 to_rps_client(file));
Chris Wilson6a2c4232014-11-04 04:51:40 -0800529 if (ret)
530 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100531
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700532 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson00731152014-05-21 12:42:56 +0100533 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
534 unsigned long unwritten;
535
536 /* The physical object once assigned is fixed for the lifetime
537 * of the obj, so we can safely drop the lock and continue
538 * to access vaddr.
539 */
540 mutex_unlock(&dev->struct_mutex);
541 unwritten = copy_from_user(vaddr, user_data, args->size);
542 mutex_lock(&dev->struct_mutex);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200543 if (unwritten) {
544 ret = -EFAULT;
545 goto out;
546 }
Chris Wilson00731152014-05-21 12:42:56 +0100547 }
548
Chris Wilson6a2c4232014-11-04 04:51:40 -0800549 drm_clflush_virt_range(vaddr, args->size);
Chris Wilsonc0336662016-05-06 15:40:21 +0100550 i915_gem_chipset_flush(to_i915(dev));
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200551
552out:
Rodrigo Vivide152b62015-07-07 16:28:51 -0700553 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200554 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100555}
556
Chris Wilson42dcedd2012-11-15 11:32:30 +0000557void *i915_gem_object_alloc(struct drm_device *dev)
558{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100559 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100560 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000561}
562
563void i915_gem_object_free(struct drm_i915_gem_object *obj)
564{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100565 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100566 kmem_cache_free(dev_priv->objects, obj);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000567}
568
Dave Airlieff72145b2011-02-07 12:16:14 +1000569static int
570i915_gem_create(struct drm_file *file,
571 struct drm_device *dev,
572 uint64_t size,
573 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700574{
Chris Wilson05394f32010-11-08 19:18:58 +0000575 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300576 int ret;
577 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700578
Dave Airlieff72145b2011-02-07 12:16:14 +1000579 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200580 if (size == 0)
581 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700582
583 /* Allocate the new object */
Dave Gordond37cd8a2016-04-22 19:14:32 +0100584 obj = i915_gem_object_create(dev, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100585 if (IS_ERR(obj))
586 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700587
Chris Wilson05394f32010-11-08 19:18:58 +0000588 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100589 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100590 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200591 if (ret)
592 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100593
Dave Airlieff72145b2011-02-07 12:16:14 +1000594 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700595 return 0;
596}
597
Dave Airlieff72145b2011-02-07 12:16:14 +1000598int
599i915_gem_dumb_create(struct drm_file *file,
600 struct drm_device *dev,
601 struct drm_mode_create_dumb *args)
602{
603 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300604 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000605 args->size = args->pitch * args->height;
606 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000607 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000608}
609
Dave Airlieff72145b2011-02-07 12:16:14 +1000610/**
611 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100612 * @dev: drm device pointer
613 * @data: ioctl data blob
614 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000615 */
616int
617i915_gem_create_ioctl(struct drm_device *dev, void *data,
618 struct drm_file *file)
619{
620 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200621
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100622 i915_gem_flush_free_objects(to_i915(dev));
623
Dave Airlieff72145b2011-02-07 12:16:14 +1000624 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000625 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000626}
627
Daniel Vetter8c599672011-12-14 13:57:31 +0100628static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100629__copy_to_user_swizzled(char __user *cpu_vaddr,
630 const char *gpu_vaddr, int gpu_offset,
631 int length)
632{
633 int ret, cpu_offset = 0;
634
635 while (length > 0) {
636 int cacheline_end = ALIGN(gpu_offset + 1, 64);
637 int this_length = min(cacheline_end - gpu_offset, length);
638 int swizzled_gpu_offset = gpu_offset ^ 64;
639
640 ret = __copy_to_user(cpu_vaddr + cpu_offset,
641 gpu_vaddr + swizzled_gpu_offset,
642 this_length);
643 if (ret)
644 return ret + length;
645
646 cpu_offset += this_length;
647 gpu_offset += this_length;
648 length -= this_length;
649 }
650
651 return 0;
652}
653
654static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700655__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
656 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100657 int length)
658{
659 int ret, cpu_offset = 0;
660
661 while (length > 0) {
662 int cacheline_end = ALIGN(gpu_offset + 1, 64);
663 int this_length = min(cacheline_end - gpu_offset, length);
664 int swizzled_gpu_offset = gpu_offset ^ 64;
665
666 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
667 cpu_vaddr + cpu_offset,
668 this_length);
669 if (ret)
670 return ret + length;
671
672 cpu_offset += this_length;
673 gpu_offset += this_length;
674 length -= this_length;
675 }
676
677 return 0;
678}
679
Brad Volkin4c914c02014-02-18 10:15:45 -0800680/*
681 * Pins the specified object's pages and synchronizes the object with
682 * GPU accesses. Sets needs_clflush to non-zero if the caller should
683 * flush the object from the CPU cache.
684 */
685int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
Chris Wilson43394c72016-08-18 17:16:47 +0100686 unsigned int *needs_clflush)
Brad Volkin4c914c02014-02-18 10:15:45 -0800687{
688 int ret;
689
Chris Wilsone95433c2016-10-28 13:58:27 +0100690 lockdep_assert_held(&obj->base.dev->struct_mutex);
Brad Volkin4c914c02014-02-18 10:15:45 -0800691
Chris Wilsone95433c2016-10-28 13:58:27 +0100692 *needs_clflush = 0;
Chris Wilson43394c72016-08-18 17:16:47 +0100693 if (!i915_gem_object_has_struct_page(obj))
694 return -ENODEV;
Brad Volkin4c914c02014-02-18 10:15:45 -0800695
Chris Wilsone95433c2016-10-28 13:58:27 +0100696 ret = i915_gem_object_wait(obj,
697 I915_WAIT_INTERRUPTIBLE |
698 I915_WAIT_LOCKED,
699 MAX_SCHEDULE_TIMEOUT,
700 NULL);
Chris Wilsonc13d87e2016-07-20 09:21:15 +0100701 if (ret)
702 return ret;
703
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100704 ret = i915_gem_object_pin_pages(obj);
Chris Wilson97649512016-08-18 17:16:50 +0100705 if (ret)
706 return ret;
707
Chris Wilsona314d5c2016-08-18 17:16:48 +0100708 i915_gem_object_flush_gtt_write_domain(obj);
709
Chris Wilson43394c72016-08-18 17:16:47 +0100710 /* If we're not in the cpu read domain, set ourself into the gtt
711 * read domain and manually flush cachelines (if required). This
712 * optimizes for the case when the gpu will dirty the data
713 * anyway again before the next pread happens.
714 */
715 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
Brad Volkin4c914c02014-02-18 10:15:45 -0800716 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
717 obj->cache_level);
Brad Volkin4c914c02014-02-18 10:15:45 -0800718
Chris Wilson43394c72016-08-18 17:16:47 +0100719 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
720 ret = i915_gem_object_set_to_cpu_domain(obj, false);
Chris Wilson97649512016-08-18 17:16:50 +0100721 if (ret)
722 goto err_unpin;
723
Chris Wilson43394c72016-08-18 17:16:47 +0100724 *needs_clflush = 0;
725 }
726
Chris Wilson97649512016-08-18 17:16:50 +0100727 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100728 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100729
730err_unpin:
731 i915_gem_object_unpin_pages(obj);
732 return ret;
Chris Wilson43394c72016-08-18 17:16:47 +0100733}
734
735int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
736 unsigned int *needs_clflush)
737{
738 int ret;
739
Chris Wilsone95433c2016-10-28 13:58:27 +0100740 lockdep_assert_held(&obj->base.dev->struct_mutex);
741
Chris Wilson43394c72016-08-18 17:16:47 +0100742 *needs_clflush = 0;
743 if (!i915_gem_object_has_struct_page(obj))
744 return -ENODEV;
745
Chris Wilsone95433c2016-10-28 13:58:27 +0100746 ret = i915_gem_object_wait(obj,
747 I915_WAIT_INTERRUPTIBLE |
748 I915_WAIT_LOCKED |
749 I915_WAIT_ALL,
750 MAX_SCHEDULE_TIMEOUT,
751 NULL);
Chris Wilson43394c72016-08-18 17:16:47 +0100752 if (ret)
753 return ret;
754
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100755 ret = i915_gem_object_pin_pages(obj);
Chris Wilson97649512016-08-18 17:16:50 +0100756 if (ret)
757 return ret;
758
Chris Wilsona314d5c2016-08-18 17:16:48 +0100759 i915_gem_object_flush_gtt_write_domain(obj);
760
Chris Wilson43394c72016-08-18 17:16:47 +0100761 /* If we're not in the cpu write domain, set ourself into the
762 * gtt write domain and manually flush cachelines (as required).
763 * This optimizes for the case when the gpu will use the data
764 * right away and we therefore have to clflush anyway.
765 */
766 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
767 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
768
769 /* Same trick applies to invalidate partially written cachelines read
770 * before writing.
771 */
772 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
773 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
774 obj->cache_level);
775
Chris Wilson43394c72016-08-18 17:16:47 +0100776 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
777 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilson97649512016-08-18 17:16:50 +0100778 if (ret)
779 goto err_unpin;
780
Chris Wilson43394c72016-08-18 17:16:47 +0100781 *needs_clflush = 0;
782 }
783
784 if ((*needs_clflush & CLFLUSH_AFTER) == 0)
785 obj->cache_dirty = true;
786
787 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100788 obj->mm.dirty = true;
Chris Wilson97649512016-08-18 17:16:50 +0100789 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100790 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100791
792err_unpin:
793 i915_gem_object_unpin_pages(obj);
794 return ret;
Brad Volkin4c914c02014-02-18 10:15:45 -0800795}
796
Daniel Vetter23c18c72012-03-25 19:47:42 +0200797static void
798shmem_clflush_swizzled_range(char *addr, unsigned long length,
799 bool swizzled)
800{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200801 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200802 unsigned long start = (unsigned long) addr;
803 unsigned long end = (unsigned long) addr + length;
804
805 /* For swizzling simply ensure that we always flush both
806 * channels. Lame, but simple and it works. Swizzled
807 * pwrite/pread is far from a hotpath - current userspace
808 * doesn't use it at all. */
809 start = round_down(start, 128);
810 end = round_up(end, 128);
811
812 drm_clflush_virt_range((void *)start, end - start);
813 } else {
814 drm_clflush_virt_range(addr, length);
815 }
816
817}
818
Daniel Vetterd174bd62012-03-25 19:47:40 +0200819/* Only difference to the fast-path function is that this can handle bit17
820 * and uses non-atomic copy and kmap functions. */
821static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100822shmem_pread_slow(struct page *page, int offset, int length,
Daniel Vetterd174bd62012-03-25 19:47:40 +0200823 char __user *user_data,
824 bool page_do_bit17_swizzling, bool needs_clflush)
825{
826 char *vaddr;
827 int ret;
828
829 vaddr = kmap(page);
830 if (needs_clflush)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100831 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +0200832 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200833
834 if (page_do_bit17_swizzling)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100835 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200836 else
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100837 ret = __copy_to_user(user_data, vaddr + offset, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200838 kunmap(page);
839
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100840 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200841}
842
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100843static int
844shmem_pread(struct page *page, int offset, int length, char __user *user_data,
845 bool page_do_bit17_swizzling, bool needs_clflush)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530846{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100847 int ret;
848
849 ret = -ENODEV;
850 if (!page_do_bit17_swizzling) {
851 char *vaddr = kmap_atomic(page);
852
853 if (needs_clflush)
854 drm_clflush_virt_range(vaddr + offset, length);
855 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
856 kunmap_atomic(vaddr);
857 }
858 if (ret == 0)
859 return 0;
860
861 return shmem_pread_slow(page, offset, length, user_data,
862 page_do_bit17_swizzling, needs_clflush);
863}
864
865static int
866i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
867 struct drm_i915_gem_pread *args)
868{
869 char __user *user_data;
870 u64 remain;
871 unsigned int obj_do_bit17_swizzling;
872 unsigned int needs_clflush;
873 unsigned int idx, offset;
874 int ret;
875
876 obj_do_bit17_swizzling = 0;
877 if (i915_gem_object_needs_bit17_swizzle(obj))
878 obj_do_bit17_swizzling = BIT(17);
879
880 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
881 if (ret)
882 return ret;
883
884 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
885 mutex_unlock(&obj->base.dev->struct_mutex);
886 if (ret)
887 return ret;
888
889 remain = args->size;
890 user_data = u64_to_user_ptr(args->data_ptr);
891 offset = offset_in_page(args->offset);
892 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
893 struct page *page = i915_gem_object_get_page(obj, idx);
894 int length;
895
896 length = remain;
897 if (offset + length > PAGE_SIZE)
898 length = PAGE_SIZE - offset;
899
900 ret = shmem_pread(page, offset, length, user_data,
901 page_to_phys(page) & obj_do_bit17_swizzling,
902 needs_clflush);
903 if (ret)
904 break;
905
906 remain -= length;
907 user_data += length;
908 offset = 0;
909 }
910
911 i915_gem_obj_finish_shmem_access(obj);
912 return ret;
913}
914
915static inline bool
916gtt_user_read(struct io_mapping *mapping,
917 loff_t base, int offset,
918 char __user *user_data, int length)
919{
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530920 void *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100921 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530922
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530923 /* We can use the cpu mem copy function because this is X86. */
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100924 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
925 unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
926 io_mapping_unmap_atomic(vaddr);
927 if (unwritten) {
928 vaddr = (void __force *)
929 io_mapping_map_wc(mapping, base, PAGE_SIZE);
930 unwritten = copy_to_user(user_data, vaddr + offset, length);
931 io_mapping_unmap(vaddr);
932 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530933 return unwritten;
934}
935
936static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100937i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
938 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530939{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100940 struct drm_i915_private *i915 = to_i915(obj->base.dev);
941 struct i915_ggtt *ggtt = &i915->ggtt;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530942 struct drm_mm_node node;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100943 struct i915_vma *vma;
944 void __user *user_data;
945 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530946 int ret;
947
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100948 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
949 if (ret)
950 return ret;
951
952 intel_runtime_pm_get(i915);
953 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
954 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +0100955 if (!IS_ERR(vma)) {
956 node.start = i915_ggtt_offset(vma);
957 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100958 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100959 if (ret) {
960 i915_vma_unpin(vma);
961 vma = ERR_PTR(ret);
962 }
963 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100964 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100965 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530966 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100967 goto out_unlock;
968 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530969 }
970
971 ret = i915_gem_object_set_to_gtt_domain(obj, false);
972 if (ret)
973 goto out_unpin;
974
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100975 mutex_unlock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530976
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100977 user_data = u64_to_user_ptr(args->data_ptr);
978 remain = args->size;
979 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530980
981 while (remain > 0) {
982 /* Operation in this page
983 *
984 * page_base = page offset within aperture
985 * page_offset = offset within page
986 * page_length = bytes to copy for this page
987 */
988 u32 page_base = node.start;
989 unsigned page_offset = offset_in_page(offset);
990 unsigned page_length = PAGE_SIZE - page_offset;
991 page_length = remain < page_length ? remain : page_length;
992 if (node.allocated) {
993 wmb();
994 ggtt->base.insert_page(&ggtt->base,
995 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100996 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530997 wmb();
998 } else {
999 page_base += offset & PAGE_MASK;
1000 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001001
1002 if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
1003 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301004 ret = -EFAULT;
1005 break;
1006 }
1007
1008 remain -= page_length;
1009 user_data += page_length;
1010 offset += page_length;
1011 }
1012
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001013 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301014out_unpin:
1015 if (node.allocated) {
1016 wmb();
1017 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001018 node.start, node.size);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301019 remove_mappable_node(&node);
1020 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001021 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301022 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001023out_unlock:
1024 intel_runtime_pm_put(i915);
1025 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +01001026
Eric Anholteb014592009-03-10 11:44:52 -07001027 return ret;
1028}
1029
Eric Anholt673a3942008-07-30 12:06:12 -07001030/**
1031 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001032 * @dev: drm device pointer
1033 * @data: ioctl data blob
1034 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -07001035 *
1036 * On error, the contents of *data are undefined.
1037 */
1038int
1039i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001040 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001041{
1042 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001043 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001044 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001045
Chris Wilson51311d02010-11-17 09:10:42 +00001046 if (args->size == 0)
1047 return 0;
1048
1049 if (!access_ok(VERIFY_WRITE,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001050 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001051 args->size))
1052 return -EFAULT;
1053
Chris Wilson03ac0642016-07-20 13:31:51 +01001054 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001055 if (!obj)
1056 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001057
Chris Wilson7dcd2492010-09-26 20:21:44 +01001058 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +00001059 if (args->offset > obj->base.size ||
1060 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001061 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001062 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001063 }
1064
Chris Wilsondb53a302011-02-03 11:57:46 +00001065 trace_i915_gem_object_pread(obj, args->offset, args->size);
1066
Chris Wilsone95433c2016-10-28 13:58:27 +01001067 ret = i915_gem_object_wait(obj,
1068 I915_WAIT_INTERRUPTIBLE,
1069 MAX_SCHEDULE_TIMEOUT,
1070 to_rps_client(file));
Chris Wilson258a5ed2016-08-05 10:14:16 +01001071 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001072 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001073
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001074 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001075 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001076 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001077
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001078 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +01001079 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001080 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301081
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001082 i915_gem_object_unpin_pages(obj);
1083out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001084 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -07001085 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001086}
1087
Keith Packard0839ccb2008-10-30 19:38:48 -07001088/* This is the fast write path which cannot handle
1089 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001090 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001091
Chris Wilsonfe115622016-10-28 13:58:40 +01001092static inline bool
1093ggtt_write(struct io_mapping *mapping,
1094 loff_t base, int offset,
1095 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -07001096{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001097 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -07001098 unsigned long unwritten;
1099
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001100 /* We can use the cpu mem copy function because this is X86. */
Chris Wilsonfe115622016-10-28 13:58:40 +01001101 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1102 unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -07001103 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +01001104 io_mapping_unmap_atomic(vaddr);
1105 if (unwritten) {
1106 vaddr = (void __force *)
1107 io_mapping_map_wc(mapping, base, PAGE_SIZE);
1108 unwritten = copy_from_user(vaddr + offset, user_data, length);
1109 io_mapping_unmap(vaddr);
1110 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001111
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001112 return unwritten;
1113}
1114
Eric Anholt3de09aa2009-03-09 09:42:23 -07001115/**
1116 * This is the fast pwrite path, where we copy the data directly from the
1117 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +01001118 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001119 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -07001120 */
Eric Anholt673a3942008-07-30 12:06:12 -07001121static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001122i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1123 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -07001124{
Chris Wilsonfe115622016-10-28 13:58:40 +01001125 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301126 struct i915_ggtt *ggtt = &i915->ggtt;
1127 struct drm_mm_node node;
Chris Wilsonfe115622016-10-28 13:58:40 +01001128 struct i915_vma *vma;
1129 u64 remain, offset;
1130 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301131 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301132
Chris Wilsonfe115622016-10-28 13:58:40 +01001133 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1134 if (ret)
1135 return ret;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001136
Chris Wilson9c870d02016-10-24 13:42:15 +01001137 intel_runtime_pm_get(i915);
Chris Wilson058d88c2016-08-15 10:49:06 +01001138 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsonde895082016-08-04 16:32:34 +01001139 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +01001140 if (!IS_ERR(vma)) {
1141 node.start = i915_ggtt_offset(vma);
1142 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +01001143 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +01001144 if (ret) {
1145 i915_vma_unpin(vma);
1146 vma = ERR_PTR(ret);
1147 }
1148 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001149 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001150 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301151 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +01001152 goto out_unlock;
1153 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301154 }
Daniel Vetter935aaa62012-03-25 19:47:35 +02001155
1156 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1157 if (ret)
1158 goto out_unpin;
1159
Chris Wilsonfe115622016-10-28 13:58:40 +01001160 mutex_unlock(&i915->drm.struct_mutex);
1161
Chris Wilsonb19482d2016-08-18 17:16:43 +01001162 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -02001163
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301164 user_data = u64_to_user_ptr(args->data_ptr);
1165 offset = args->offset;
1166 remain = args->size;
1167 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -07001168 /* Operation in this page
1169 *
Keith Packard0839ccb2008-10-30 19:38:48 -07001170 * page_base = page offset within aperture
1171 * page_offset = offset within page
1172 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -07001173 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301174 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001175 unsigned int page_offset = offset_in_page(offset);
1176 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301177 page_length = remain < page_length ? remain : page_length;
1178 if (node.allocated) {
1179 wmb(); /* flush the write before we modify the GGTT */
1180 ggtt->base.insert_page(&ggtt->base,
1181 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1182 node.start, I915_CACHE_NONE, 0);
1183 wmb(); /* flush modifications to the GGTT (insert_page) */
1184 } else {
1185 page_base += offset & PAGE_MASK;
1186 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001187 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -07001188 * source page isn't available. Return the error and we'll
1189 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301190 * If the object is non-shmem backed, we retry again with the
1191 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -07001192 */
Chris Wilsonfe115622016-10-28 13:58:40 +01001193 if (ggtt_write(&ggtt->mappable, page_base, page_offset,
1194 user_data, page_length)) {
1195 ret = -EFAULT;
1196 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001197 }
Eric Anholt673a3942008-07-30 12:06:12 -07001198
Keith Packard0839ccb2008-10-30 19:38:48 -07001199 remain -= page_length;
1200 user_data += page_length;
1201 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -07001202 }
Chris Wilsonb19482d2016-08-18 17:16:43 +01001203 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +01001204
1205 mutex_lock(&i915->drm.struct_mutex);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001206out_unpin:
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301207 if (node.allocated) {
1208 wmb();
1209 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001210 node.start, node.size);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301211 remove_mappable_node(&node);
1212 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001213 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301214 }
Chris Wilsonfe115622016-10-28 13:58:40 +01001215out_unlock:
Chris Wilson9c870d02016-10-24 13:42:15 +01001216 intel_runtime_pm_put(i915);
Chris Wilsonfe115622016-10-28 13:58:40 +01001217 mutex_unlock(&i915->drm.struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -07001218 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001219}
1220
Eric Anholt673a3942008-07-30 12:06:12 -07001221static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001222shmem_pwrite_slow(struct page *page, int offset, int length,
Daniel Vetterd174bd62012-03-25 19:47:40 +02001223 char __user *user_data,
1224 bool page_do_bit17_swizzling,
1225 bool needs_clflush_before,
1226 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001227{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001228 char *vaddr;
1229 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001230
Daniel Vetterd174bd62012-03-25 19:47:40 +02001231 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001232 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Chris Wilsonfe115622016-10-28 13:58:40 +01001233 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +02001234 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001235 if (page_do_bit17_swizzling)
Chris Wilsonfe115622016-10-28 13:58:40 +01001236 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1237 length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001238 else
Chris Wilsonfe115622016-10-28 13:58:40 +01001239 ret = __copy_from_user(vaddr + offset, user_data, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001240 if (needs_clflush_after)
Chris Wilsonfe115622016-10-28 13:58:40 +01001241 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +02001242 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001243 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001244
Chris Wilson755d2212012-09-04 21:02:55 +01001245 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001246}
1247
Chris Wilsonfe115622016-10-28 13:58:40 +01001248/* Per-page copy function for the shmem pwrite fastpath.
1249 * Flushes invalid cachelines before writing to the target if
1250 * needs_clflush_before is set and flushes out any written cachelines after
1251 * writing if needs_clflush is set.
1252 */
Eric Anholt40123c12009-03-09 13:42:30 -07001253static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001254shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1255 bool page_do_bit17_swizzling,
1256 bool needs_clflush_before,
1257 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -07001258{
Chris Wilsonfe115622016-10-28 13:58:40 +01001259 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001260
Chris Wilsonfe115622016-10-28 13:58:40 +01001261 ret = -ENODEV;
1262 if (!page_do_bit17_swizzling) {
1263 char *vaddr = kmap_atomic(page);
1264
1265 if (needs_clflush_before)
1266 drm_clflush_virt_range(vaddr + offset, len);
1267 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1268 if (needs_clflush_after)
1269 drm_clflush_virt_range(vaddr + offset, len);
1270
1271 kunmap_atomic(vaddr);
1272 }
1273 if (ret == 0)
1274 return ret;
1275
1276 return shmem_pwrite_slow(page, offset, len, user_data,
1277 page_do_bit17_swizzling,
1278 needs_clflush_before,
1279 needs_clflush_after);
1280}
1281
1282static int
1283i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1284 const struct drm_i915_gem_pwrite *args)
1285{
1286 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1287 void __user *user_data;
1288 u64 remain;
1289 unsigned int obj_do_bit17_swizzling;
1290 unsigned int partial_cacheline_write;
1291 unsigned int needs_clflush;
1292 unsigned int offset, idx;
1293 int ret;
1294
1295 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilson43394c72016-08-18 17:16:47 +01001296 if (ret)
1297 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001298
Chris Wilsonfe115622016-10-28 13:58:40 +01001299 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1300 mutex_unlock(&i915->drm.struct_mutex);
1301 if (ret)
1302 return ret;
1303
1304 obj_do_bit17_swizzling = 0;
1305 if (i915_gem_object_needs_bit17_swizzle(obj))
1306 obj_do_bit17_swizzling = BIT(17);
1307
1308 /* If we don't overwrite a cacheline completely we need to be
1309 * careful to have up-to-date data by first clflushing. Don't
1310 * overcomplicate things and flush the entire patch.
1311 */
1312 partial_cacheline_write = 0;
1313 if (needs_clflush & CLFLUSH_BEFORE)
1314 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1315
Chris Wilson43394c72016-08-18 17:16:47 +01001316 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +01001317 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +01001318 offset = offset_in_page(args->offset);
1319 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1320 struct page *page = i915_gem_object_get_page(obj, idx);
1321 int length;
Eric Anholt40123c12009-03-09 13:42:30 -07001322
Chris Wilsonfe115622016-10-28 13:58:40 +01001323 length = remain;
1324 if (offset + length > PAGE_SIZE)
1325 length = PAGE_SIZE - offset;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001326
Chris Wilsonfe115622016-10-28 13:58:40 +01001327 ret = shmem_pwrite(page, offset, length, user_data,
1328 page_to_phys(page) & obj_do_bit17_swizzling,
1329 (offset | length) & partial_cacheline_write,
1330 needs_clflush & CLFLUSH_AFTER);
1331 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +01001332 break;
1333
Chris Wilsonfe115622016-10-28 13:58:40 +01001334 remain -= length;
1335 user_data += length;
1336 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001337 }
1338
Rodrigo Vivide152b62015-07-07 16:28:51 -07001339 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +01001340 i915_gem_obj_finish_shmem_access(obj);
Eric Anholt40123c12009-03-09 13:42:30 -07001341 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001342}
1343
1344/**
1345 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001346 * @dev: drm device
1347 * @data: ioctl data blob
1348 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001349 *
1350 * On error, the contents of the buffer that were to be modified are undefined.
1351 */
1352int
1353i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001354 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001355{
1356 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001357 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +00001358 int ret;
1359
1360 if (args->size == 0)
1361 return 0;
1362
1363 if (!access_ok(VERIFY_READ,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001364 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001365 args->size))
1366 return -EFAULT;
1367
Chris Wilson03ac0642016-07-20 13:31:51 +01001368 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001369 if (!obj)
1370 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001371
Chris Wilson7dcd2492010-09-26 20:21:44 +01001372 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +00001373 if (args->offset > obj->base.size ||
1374 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001375 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001376 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001377 }
1378
Chris Wilsondb53a302011-02-03 11:57:46 +00001379 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1380
Chris Wilsone95433c2016-10-28 13:58:27 +01001381 ret = i915_gem_object_wait(obj,
1382 I915_WAIT_INTERRUPTIBLE |
1383 I915_WAIT_ALL,
1384 MAX_SCHEDULE_TIMEOUT,
1385 to_rps_client(file));
Chris Wilson258a5ed2016-08-05 10:14:16 +01001386 if (ret)
1387 goto err;
1388
Chris Wilsonfe115622016-10-28 13:58:40 +01001389 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001390 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +01001391 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001392
Daniel Vetter935aaa62012-03-25 19:47:35 +02001393 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -07001394 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1395 * it would end up going through the fenced access, and we'll get
1396 * different detiling behavior between reading and writing.
1397 * pread/pwrite currently are reading and writing from the CPU
1398 * perspective, requiring manual detiling by the client.
1399 */
Chris Wilson6eae0052016-06-20 15:05:52 +01001400 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +01001401 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +02001402 /* Note that the gtt paths might fail with non-page-backed user
1403 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +01001404 * textures). Fallback to the shmem path in that case.
1405 */
Chris Wilsonfe115622016-10-28 13:58:40 +01001406 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -07001407
Chris Wilsond1054ee2016-07-16 18:42:36 +01001408 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -08001409 if (obj->phys_handle)
1410 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301411 else
Chris Wilsonfe115622016-10-28 13:58:40 +01001412 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -08001413 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +01001414
Chris Wilsonfe115622016-10-28 13:58:40 +01001415 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001416err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001417 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001418 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001419}
1420
Chris Wilsond243ad82016-08-18 17:16:44 +01001421static inline enum fb_op_origin
Chris Wilsonaeecc962016-06-17 14:46:39 -03001422write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1423{
Chris Wilson50349242016-08-18 17:17:04 +01001424 return (domain == I915_GEM_DOMAIN_GTT ?
1425 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001426}
1427
Chris Wilson40e62d52016-10-28 13:58:41 +01001428static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1429{
1430 struct drm_i915_private *i915;
1431 struct list_head *list;
1432 struct i915_vma *vma;
1433
1434 list_for_each_entry(vma, &obj->vma_list, obj_link) {
1435 if (!i915_vma_is_ggtt(vma))
1436 continue;
1437
1438 if (i915_vma_is_active(vma))
1439 continue;
1440
1441 if (!drm_mm_node_allocated(&vma->node))
1442 continue;
1443
1444 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1445 }
1446
1447 i915 = to_i915(obj->base.dev);
1448 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
Joonas Lahtinen56cea322016-11-02 12:16:04 +02001449 list_move_tail(&obj->global_link, list);
Chris Wilson40e62d52016-10-28 13:58:41 +01001450}
1451
Eric Anholt673a3942008-07-30 12:06:12 -07001452/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001453 * Called when user space prepares to use an object with the CPU, either
1454 * through the mmap ioctl's mapping or a GTT mapping.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001455 * @dev: drm device
1456 * @data: ioctl data blob
1457 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001458 */
1459int
1460i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001461 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001462{
1463 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001464 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001465 uint32_t read_domains = args->read_domains;
1466 uint32_t write_domain = args->write_domain;
Chris Wilson40e62d52016-10-28 13:58:41 +01001467 int err;
Eric Anholt673a3942008-07-30 12:06:12 -07001468
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001469 /* Only handle setting domains to types used by the CPU. */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001470 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001471 return -EINVAL;
1472
1473 /* Having something in the write domain implies it's in the read
1474 * domain, and only that read domain. Enforce that in the request.
1475 */
1476 if (write_domain != 0 && read_domains != write_domain)
1477 return -EINVAL;
1478
Chris Wilson03ac0642016-07-20 13:31:51 +01001479 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001480 if (!obj)
1481 return -ENOENT;
Jesse Barnes652c3932009-08-17 13:31:43 -07001482
Chris Wilson3236f572012-08-24 09:35:09 +01001483 /* Try to flush the object off the GPU without holding the lock.
1484 * We will repeat the flush holding the lock in the normal manner
1485 * to catch cases where we are gazumped.
1486 */
Chris Wilson40e62d52016-10-28 13:58:41 +01001487 err = i915_gem_object_wait(obj,
Chris Wilsone95433c2016-10-28 13:58:27 +01001488 I915_WAIT_INTERRUPTIBLE |
1489 (write_domain ? I915_WAIT_ALL : 0),
1490 MAX_SCHEDULE_TIMEOUT,
1491 to_rps_client(file));
Chris Wilson40e62d52016-10-28 13:58:41 +01001492 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001493 goto out;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001494
Chris Wilson40e62d52016-10-28 13:58:41 +01001495 /* Flush and acquire obj->pages so that we are coherent through
1496 * direct access in memory with previous cached writes through
1497 * shmemfs and that our cache domain tracking remains valid.
1498 * For example, if the obj->filp was moved to swap without us
1499 * being notified and releasing the pages, we would mistakenly
1500 * continue to assume that the obj remained out of the CPU cached
1501 * domain.
1502 */
1503 err = i915_gem_object_pin_pages(obj);
1504 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001505 goto out;
Chris Wilson40e62d52016-10-28 13:58:41 +01001506
1507 err = i915_mutex_lock_interruptible(dev);
1508 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001509 goto out_unpin;
Chris Wilson3236f572012-08-24 09:35:09 +01001510
Chris Wilson43566de2015-01-02 16:29:29 +05301511 if (read_domains & I915_GEM_DOMAIN_GTT)
Chris Wilson40e62d52016-10-28 13:58:41 +01001512 err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Chris Wilson43566de2015-01-02 16:29:29 +05301513 else
Chris Wilson40e62d52016-10-28 13:58:41 +01001514 err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1515
1516 /* And bump the LRU for this access */
1517 i915_gem_object_bump_inactive_ggtt(obj);
1518
1519 mutex_unlock(&dev->struct_mutex);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001520
Daniel Vetter031b6982015-06-26 19:35:16 +02001521 if (write_domain != 0)
Chris Wilsonaeecc962016-06-17 14:46:39 -03001522 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
Daniel Vetter031b6982015-06-26 19:35:16 +02001523
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001524out_unpin:
Chris Wilson40e62d52016-10-28 13:58:41 +01001525 i915_gem_object_unpin_pages(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001526out:
1527 i915_gem_object_put(obj);
Chris Wilson40e62d52016-10-28 13:58:41 +01001528 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07001529}
1530
1531/**
1532 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001533 * @dev: drm device
1534 * @data: ioctl data blob
1535 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001536 */
1537int
1538i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001539 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001540{
1541 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001542 struct drm_i915_gem_object *obj;
Chris Wilsonc21724c2016-08-05 10:14:19 +01001543 int err = 0;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001544
Chris Wilson03ac0642016-07-20 13:31:51 +01001545 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +01001546 if (!obj)
1547 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001548
Eric Anholt673a3942008-07-30 12:06:12 -07001549 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilsonc21724c2016-08-05 10:14:19 +01001550 if (READ_ONCE(obj->pin_display)) {
1551 err = i915_mutex_lock_interruptible(dev);
1552 if (!err) {
1553 i915_gem_object_flush_cpu_write_domain(obj);
1554 mutex_unlock(&dev->struct_mutex);
1555 }
1556 }
Eric Anholte47c68e2008-11-14 13:35:19 -08001557
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001558 i915_gem_object_put(obj);
Chris Wilsonc21724c2016-08-05 10:14:19 +01001559 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07001560}
1561
1562/**
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001563 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1564 * it is mapped to.
1565 * @dev: drm device
1566 * @data: ioctl data blob
1567 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001568 *
1569 * While the mapping holds a reference on the contents of the object, it doesn't
1570 * imply a ref on the object itself.
Daniel Vetter34367382014-10-16 12:28:18 +02001571 *
1572 * IMPORTANT:
1573 *
1574 * DRM driver writers who look a this function as an example for how to do GEM
1575 * mmap support, please don't implement mmap support like here. The modern way
1576 * to implement DRM mmap support is with an mmap offset ioctl (like
1577 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1578 * That way debug tooling like valgrind will understand what's going on, hiding
1579 * the mmap call in a driver private ioctl will break that. The i915 driver only
1580 * does cpu mmaps this way because we didn't know better.
Eric Anholt673a3942008-07-30 12:06:12 -07001581 */
1582int
1583i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001584 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001585{
1586 struct drm_i915_gem_mmap *args = data;
Chris Wilson03ac0642016-07-20 13:31:51 +01001587 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001588 unsigned long addr;
1589
Akash Goel1816f922015-01-02 16:29:30 +05301590 if (args->flags & ~(I915_MMAP_WC))
1591 return -EINVAL;
1592
Borislav Petkov568a58e2016-03-29 17:42:01 +02001593 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
Akash Goel1816f922015-01-02 16:29:30 +05301594 return -ENODEV;
1595
Chris Wilson03ac0642016-07-20 13:31:51 +01001596 obj = i915_gem_object_lookup(file, args->handle);
1597 if (!obj)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001598 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001599
Daniel Vetter1286ff72012-05-10 15:25:09 +02001600 /* prime objects have no backing filp to GEM mmap
1601 * pages from.
1602 */
Chris Wilson03ac0642016-07-20 13:31:51 +01001603 if (!obj->base.filp) {
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001604 i915_gem_object_put(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001605 return -EINVAL;
1606 }
1607
Chris Wilson03ac0642016-07-20 13:31:51 +01001608 addr = vm_mmap(obj->base.filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001609 PROT_READ | PROT_WRITE, MAP_SHARED,
1610 args->offset);
Akash Goel1816f922015-01-02 16:29:30 +05301611 if (args->flags & I915_MMAP_WC) {
1612 struct mm_struct *mm = current->mm;
1613 struct vm_area_struct *vma;
1614
Michal Hocko80a89a52016-05-23 16:26:11 -07001615 if (down_write_killable(&mm->mmap_sem)) {
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001616 i915_gem_object_put(obj);
Michal Hocko80a89a52016-05-23 16:26:11 -07001617 return -EINTR;
1618 }
Akash Goel1816f922015-01-02 16:29:30 +05301619 vma = find_vma(mm, addr);
1620 if (vma)
1621 vma->vm_page_prot =
1622 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1623 else
1624 addr = -ENOMEM;
1625 up_write(&mm->mmap_sem);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001626
1627 /* This may race, but that's ok, it only gets set */
Chris Wilson50349242016-08-18 17:17:04 +01001628 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
Akash Goel1816f922015-01-02 16:29:30 +05301629 }
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001630 i915_gem_object_put(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001631 if (IS_ERR((void *)addr))
1632 return addr;
1633
1634 args->addr_ptr = (uint64_t) addr;
1635
1636 return 0;
1637}
1638
Chris Wilson03af84f2016-08-18 17:17:01 +01001639static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1640{
1641 u64 size;
1642
1643 size = i915_gem_object_get_stride(obj);
1644 size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1645
1646 return size >> PAGE_SHIFT;
1647}
1648
Jesse Barnesde151cf2008-11-12 10:03:55 -08001649/**
Chris Wilson4cc69072016-08-25 19:05:19 +01001650 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1651 *
1652 * A history of the GTT mmap interface:
1653 *
1654 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1655 * aligned and suitable for fencing, and still fit into the available
1656 * mappable space left by the pinned display objects. A classic problem
1657 * we called the page-fault-of-doom where we would ping-pong between
1658 * two objects that could not fit inside the GTT and so the memcpy
1659 * would page one object in at the expense of the other between every
1660 * single byte.
1661 *
1662 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1663 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1664 * object is too large for the available space (or simply too large
1665 * for the mappable aperture!), a view is created instead and faulted
1666 * into userspace. (This view is aligned and sized appropriately for
1667 * fenced access.)
1668 *
1669 * Restrictions:
1670 *
1671 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1672 * hangs on some architectures, corruption on others. An attempt to service
1673 * a GTT page fault from a snoopable object will generate a SIGBUS.
1674 *
1675 * * the object must be able to fit into RAM (physical memory, though no
1676 * limited to the mappable aperture).
1677 *
1678 *
1679 * Caveats:
1680 *
1681 * * a new GTT page fault will synchronize rendering from the GPU and flush
1682 * all data to system memory. Subsequent access will not be synchronized.
1683 *
1684 * * all mappings are revoked on runtime device suspend.
1685 *
1686 * * there are only 8, 16 or 32 fence registers to share between all users
1687 * (older machines require fence register for display and blitter access
1688 * as well). Contention of the fence registers will cause the previous users
1689 * to be unmapped and any new access will generate new page faults.
1690 *
1691 * * running out of memory while servicing a fault may generate a SIGBUS,
1692 * rather than the expected SIGSEGV.
1693 */
1694int i915_gem_mmap_gtt_version(void)
1695{
1696 return 1;
1697}
1698
1699/**
Jesse Barnesde151cf2008-11-12 10:03:55 -08001700 * i915_gem_fault - fault a page into the GTT
Chris Wilson058d88c2016-08-15 10:49:06 +01001701 * @area: CPU VMA in question
Geliang Tangd9072a32015-09-15 05:58:44 -07001702 * @vmf: fault info
Jesse Barnesde151cf2008-11-12 10:03:55 -08001703 *
1704 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1705 * from userspace. The fault handler takes care of binding the object to
1706 * the GTT (if needed), allocating and programming a fence register (again,
1707 * only if needed based on whether the old reg is still valid or the object
1708 * is tiled) and inserting a new PTE into the faulting process.
1709 *
1710 * Note that the faulting process may involve evicting existing objects
1711 * from the GTT and/or fence registers to make room. So performance may
1712 * suffer if the GTT working set is large or there are few fence registers
1713 * left.
Chris Wilson4cc69072016-08-25 19:05:19 +01001714 *
1715 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1716 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
Jesse Barnesde151cf2008-11-12 10:03:55 -08001717 */
Chris Wilson058d88c2016-08-15 10:49:06 +01001718int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001719{
Chris Wilson03af84f2016-08-18 17:17:01 +01001720#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
Chris Wilson058d88c2016-08-15 10:49:06 +01001721 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
Chris Wilson05394f32010-11-08 19:18:58 +00001722 struct drm_device *dev = obj->base.dev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001723 struct drm_i915_private *dev_priv = to_i915(dev);
1724 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001725 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Chris Wilson058d88c2016-08-15 10:49:06 +01001726 struct i915_vma *vma;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001727 pgoff_t page_offset;
Chris Wilson82118872016-08-18 17:17:05 +01001728 unsigned int flags;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001729 int ret;
Paulo Zanonif65c9162013-11-27 18:20:34 -02001730
Jesse Barnesde151cf2008-11-12 10:03:55 -08001731 /* We don't use vmf->pgoff since that has the fake offset */
Chris Wilson058d88c2016-08-15 10:49:06 +01001732 page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
Jesse Barnesde151cf2008-11-12 10:03:55 -08001733 PAGE_SHIFT;
1734
Chris Wilsondb53a302011-02-03 11:57:46 +00001735 trace_i915_gem_object_fault(obj, page_offset, true, write);
1736
Chris Wilson6e4930f2014-02-07 18:37:06 -02001737 /* Try to flush the object off the GPU first without holding the lock.
Chris Wilsonb8f90962016-08-05 10:14:07 +01001738 * Upon acquiring the lock, we will perform our sanity checks and then
Chris Wilson6e4930f2014-02-07 18:37:06 -02001739 * repeat the flush holding the lock in the normal manner to catch cases
1740 * where we are gazumped.
1741 */
Chris Wilsone95433c2016-10-28 13:58:27 +01001742 ret = i915_gem_object_wait(obj,
1743 I915_WAIT_INTERRUPTIBLE,
1744 MAX_SCHEDULE_TIMEOUT,
1745 NULL);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001746 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001747 goto err;
1748
Chris Wilson40e62d52016-10-28 13:58:41 +01001749 ret = i915_gem_object_pin_pages(obj);
1750 if (ret)
1751 goto err;
1752
Chris Wilsonb8f90962016-08-05 10:14:07 +01001753 intel_runtime_pm_get(dev_priv);
1754
1755 ret = i915_mutex_lock_interruptible(dev);
1756 if (ret)
1757 goto err_rpm;
Chris Wilson6e4930f2014-02-07 18:37:06 -02001758
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001759 /* Access to snoopable pages through the GTT is incoherent. */
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00001760 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
Chris Wilsonddeff6e2014-05-28 16:16:41 +01001761 ret = -EFAULT;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001762 goto err_unlock;
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001763 }
1764
Chris Wilson82118872016-08-18 17:17:05 +01001765 /* If the object is smaller than a couple of partial vma, it is
1766 * not worth only creating a single partial vma - we may as well
1767 * clear enough space for the full object.
1768 */
1769 flags = PIN_MAPPABLE;
1770 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1771 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1772
Chris Wilsona61007a2016-08-18 17:17:02 +01001773 /* Now pin it into the GTT as needed */
Chris Wilson82118872016-08-18 17:17:05 +01001774 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
Chris Wilsona61007a2016-08-18 17:17:02 +01001775 if (IS_ERR(vma)) {
1776 struct i915_ggtt_view view;
Chris Wilson03af84f2016-08-18 17:17:01 +01001777 unsigned int chunk_size;
1778
Chris Wilsona61007a2016-08-18 17:17:02 +01001779 /* Use a partial view if it is bigger than available space */
Chris Wilson03af84f2016-08-18 17:17:01 +01001780 chunk_size = MIN_CHUNK_PAGES;
1781 if (i915_gem_object_is_tiled(obj))
Chris Wilson0ef723c2016-11-07 10:54:43 +00001782 chunk_size = roundup(chunk_size, tile_row_pages(obj));
Joonas Lahtinene7ded2d2015-05-08 14:37:39 +03001783
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001784 memset(&view, 0, sizeof(view));
1785 view.type = I915_GGTT_VIEW_PARTIAL;
1786 view.params.partial.offset = rounddown(page_offset, chunk_size);
1787 view.params.partial.size =
Chris Wilsona61007a2016-08-18 17:17:02 +01001788 min_t(unsigned int, chunk_size,
Chris Wilson908b1232016-10-11 10:06:56 +01001789 vma_pages(area) - view.params.partial.offset);
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001790
Chris Wilsonaa136d92016-08-18 17:17:03 +01001791 /* If the partial covers the entire object, just create a
1792 * normal VMA.
1793 */
1794 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1795 view.type = I915_GGTT_VIEW_NORMAL;
1796
Chris Wilson50349242016-08-18 17:17:04 +01001797 /* Userspace is now writing through an untracked VMA, abandon
1798 * all hope that the hardware is able to track future writes.
1799 */
1800 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1801
Chris Wilsona61007a2016-08-18 17:17:02 +01001802 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1803 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001804 if (IS_ERR(vma)) {
1805 ret = PTR_ERR(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001806 goto err_unlock;
Chris Wilson058d88c2016-08-15 10:49:06 +01001807 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001808
Chris Wilsonc9839302012-11-20 10:45:17 +00001809 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1810 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001811 goto err_unpin;
Chris Wilsonc9839302012-11-20 10:45:17 +00001812
Chris Wilson49ef5292016-08-18 17:17:00 +01001813 ret = i915_vma_get_fence(vma);
Chris Wilsonc9839302012-11-20 10:45:17 +00001814 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001815 goto err_unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001816
Chris Wilson275f0392016-10-24 13:42:14 +01001817 /* Mark as being mmapped into userspace for later revocation */
Chris Wilson9c870d02016-10-24 13:42:15 +01001818 assert_rpm_wakelock_held(dev_priv);
Chris Wilson275f0392016-10-24 13:42:14 +01001819 if (list_empty(&obj->userfault_link))
1820 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
Chris Wilson275f0392016-10-24 13:42:14 +01001821
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001822 /* Finally, remap it using the new GTT offset */
Chris Wilsonc58305a2016-08-19 16:54:28 +01001823 ret = remap_io_mapping(area,
1824 area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1825 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1826 min_t(u64, vma->size, area->vm_end - area->vm_start),
1827 &ggtt->mappable);
Chris Wilsona61007a2016-08-18 17:17:02 +01001828
Chris Wilsonb8f90962016-08-05 10:14:07 +01001829err_unpin:
Chris Wilson058d88c2016-08-15 10:49:06 +01001830 __i915_vma_unpin(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001831err_unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001832 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001833err_rpm:
1834 intel_runtime_pm_put(dev_priv);
Chris Wilson40e62d52016-10-28 13:58:41 +01001835 i915_gem_object_unpin_pages(obj);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001836err:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001837 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001838 case -EIO:
Daniel Vetter2232f032014-09-04 09:36:18 +02001839 /*
1840 * We eat errors when the gpu is terminally wedged to avoid
1841 * userspace unduly crashing (gl has no provisions for mmaps to
1842 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1843 * and so needs to be reported.
1844 */
1845 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
Paulo Zanonif65c9162013-11-27 18:20:34 -02001846 ret = VM_FAULT_SIGBUS;
1847 break;
1848 }
Chris Wilson045e7692010-11-07 09:18:22 +00001849 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001850 /*
1851 * EAGAIN means the gpu is hung and we'll wait for the error
1852 * handler to reset everything when re-faulting in
1853 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001854 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001855 case 0:
1856 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001857 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001858 case -EBUSY:
1859 /*
1860 * EBUSY is ok: this just means that another thread
1861 * already did the job.
1862 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001863 ret = VM_FAULT_NOPAGE;
1864 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001865 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001866 ret = VM_FAULT_OOM;
1867 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001868 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001869 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001870 ret = VM_FAULT_SIGBUS;
1871 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001872 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001873 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001874 ret = VM_FAULT_SIGBUS;
1875 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001876 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001877 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001878}
1879
1880/**
Chris Wilson901782b2009-07-10 08:18:50 +01001881 * i915_gem_release_mmap - remove physical page mappings
1882 * @obj: obj in question
1883 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001884 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001885 * relinquish ownership of the pages back to the system.
1886 *
1887 * It is vital that we remove the page mapping if we have mapped a tiled
1888 * object through the GTT and then lose the fence register due to
1889 * resource pressure. Similarly if the object has been moved out of the
1890 * aperture, than pages mapped into userspace must be revoked. Removing the
1891 * mapping will then trigger a page fault on the next user access, allowing
1892 * fixup by i915_gem_fault().
1893 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001894void
Chris Wilson05394f32010-11-08 19:18:58 +00001895i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001896{
Chris Wilson275f0392016-10-24 13:42:14 +01001897 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Chris Wilson275f0392016-10-24 13:42:14 +01001898
Chris Wilson349f2cc2016-04-13 17:35:12 +01001899 /* Serialisation between user GTT access and our code depends upon
1900 * revoking the CPU's PTE whilst the mutex is held. The next user
1901 * pagefault then has to wait until we release the mutex.
Chris Wilson9c870d02016-10-24 13:42:15 +01001902 *
1903 * Note that RPM complicates somewhat by adding an additional
1904 * requirement that operations to the GGTT be made holding the RPM
1905 * wakeref.
Chris Wilson349f2cc2016-04-13 17:35:12 +01001906 */
Chris Wilson275f0392016-10-24 13:42:14 +01001907 lockdep_assert_held(&i915->drm.struct_mutex);
Chris Wilson9c870d02016-10-24 13:42:15 +01001908 intel_runtime_pm_get(i915);
Chris Wilson349f2cc2016-04-13 17:35:12 +01001909
Chris Wilson3594a3e2016-10-24 13:42:16 +01001910 if (list_empty(&obj->userfault_link))
Chris Wilson9c870d02016-10-24 13:42:15 +01001911 goto out;
Chris Wilson901782b2009-07-10 08:18:50 +01001912
Chris Wilson3594a3e2016-10-24 13:42:16 +01001913 list_del_init(&obj->userfault_link);
David Herrmann6796cb12014-01-03 14:24:19 +01001914 drm_vma_node_unmap(&obj->base.vma_node,
1915 obj->base.dev->anon_inode->i_mapping);
Chris Wilson349f2cc2016-04-13 17:35:12 +01001916
1917 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1918 * memory transactions from userspace before we return. The TLB
1919 * flushing implied above by changing the PTE above *should* be
1920 * sufficient, an extra barrier here just provides us with a bit
1921 * of paranoid documentation about our requirement to serialise
1922 * memory writes before touching registers / GSM.
1923 */
1924 wmb();
Chris Wilson9c870d02016-10-24 13:42:15 +01001925
1926out:
1927 intel_runtime_pm_put(i915);
Chris Wilson901782b2009-07-10 08:18:50 +01001928}
1929
Chris Wilson7c108fd2016-10-24 13:42:18 +01001930void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
Chris Wilsoneedd10f2014-06-16 08:57:44 +01001931{
Chris Wilson3594a3e2016-10-24 13:42:16 +01001932 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +01001933 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +01001934
Chris Wilson3594a3e2016-10-24 13:42:16 +01001935 /*
1936 * Only called during RPM suspend. All users of the userfault_list
1937 * must be holding an RPM wakeref to ensure that this can not
1938 * run concurrently with themselves (and use the struct_mutex for
1939 * protection between themselves).
1940 */
1941
1942 list_for_each_entry_safe(obj, on,
1943 &dev_priv->mm.userfault_list, userfault_link) {
Chris Wilson275f0392016-10-24 13:42:14 +01001944 list_del_init(&obj->userfault_link);
Chris Wilson275f0392016-10-24 13:42:14 +01001945 drm_vma_node_unmap(&obj->base.vma_node,
1946 obj->base.dev->anon_inode->i_mapping);
Chris Wilson275f0392016-10-24 13:42:14 +01001947 }
Chris Wilson7c108fd2016-10-24 13:42:18 +01001948
1949 /* The fence will be lost when the device powers down. If any were
1950 * in use by hardware (i.e. they are pinned), we should not be powering
1951 * down! All other fences will be reacquired by the user upon waking.
1952 */
1953 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1954 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1955
1956 if (WARN_ON(reg->pin_count))
1957 continue;
1958
1959 if (!reg->vma)
1960 continue;
1961
1962 GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
1963 reg->dirty = true;
1964 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +01001965}
1966
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001967/**
1968 * i915_gem_get_ggtt_size - return required global GTT size for an object
Chris Wilsona9f14812016-08-04 16:32:28 +01001969 * @dev_priv: i915 device
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001970 * @size: object size
1971 * @tiling_mode: tiling mode
1972 *
1973 * Return the required global GTT size for an object, taking into account
1974 * potential fence register mapping.
1975 */
Chris Wilsona9f14812016-08-04 16:32:28 +01001976u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1977 u64 size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001978{
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001979 u64 ggtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001980
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001981 GEM_BUG_ON(size == 0);
1982
Chris Wilsona9f14812016-08-04 16:32:28 +01001983 if (INTEL_GEN(dev_priv) >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001984 tiling_mode == I915_TILING_NONE)
1985 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001986
1987 /* Previous chips need a power-of-two fence region when tiling */
Chris Wilsona9f14812016-08-04 16:32:28 +01001988 if (IS_GEN3(dev_priv))
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001989 ggtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001990 else
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001991 ggtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001992
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001993 while (ggtt_size < size)
1994 ggtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001995
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001996 return ggtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001997}
1998
Jesse Barnesde151cf2008-11-12 10:03:55 -08001999/**
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002000 * i915_gem_get_ggtt_alignment - return required global GTT alignment
Chris Wilsona9f14812016-08-04 16:32:28 +01002001 * @dev_priv: i915 device
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002002 * @size: object size
2003 * @tiling_mode: tiling mode
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002004 * @fenced: is fenced alignment required or not
Jesse Barnesde151cf2008-11-12 10:03:55 -08002005 *
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002006 * Return the required global GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01002007 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002008 */
Chris Wilsona9f14812016-08-04 16:32:28 +01002009u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002010 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002011{
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002012 GEM_BUG_ON(size == 0);
2013
Jesse Barnesde151cf2008-11-12 10:03:55 -08002014 /*
2015 * Minimum alignment is 4k (GTT page size), but might be greater
2016 * if a fence register is needed for the object.
2017 */
Chris Wilsona9f14812016-08-04 16:32:28 +01002018 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07002019 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002020 return 4096;
2021
2022 /*
2023 * Previous chips need to be aligned to the size of the smallest
2024 * fence register that can contain the object.
2025 */
Chris Wilsona9f14812016-08-04 16:32:28 +01002026 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002027}
2028
Chris Wilsond8cb5082012-08-11 15:41:03 +01002029static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2030{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002031 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002032 int err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002033
Chris Wilsonf3f61842016-08-05 10:14:14 +01002034 err = drm_gem_create_mmap_offset(&obj->base);
2035 if (!err)
2036 return 0;
Daniel Vetterda494d72012-12-20 15:11:16 +01002037
Chris Wilsonf3f61842016-08-05 10:14:14 +01002038 /* We can idle the GPU locklessly to flush stale objects, but in order
2039 * to claim that space for ourselves, we need to take the big
2040 * struct_mutex to free the requests+objects and allocate our slot.
Chris Wilsond8cb5082012-08-11 15:41:03 +01002041 */
Chris Wilsonea746f32016-09-09 14:11:49 +01002042 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002043 if (err)
2044 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002045
Chris Wilsonf3f61842016-08-05 10:14:14 +01002046 err = i915_mutex_lock_interruptible(&dev_priv->drm);
2047 if (!err) {
2048 i915_gem_retire_requests(dev_priv);
2049 err = drm_gem_create_mmap_offset(&obj->base);
2050 mutex_unlock(&dev_priv->drm.struct_mutex);
2051 }
Daniel Vetterda494d72012-12-20 15:11:16 +01002052
Chris Wilsonf3f61842016-08-05 10:14:14 +01002053 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002054}
2055
2056static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2057{
Chris Wilsond8cb5082012-08-11 15:41:03 +01002058 drm_gem_free_mmap_offset(&obj->base);
2059}
2060
Dave Airlieda6b51d2014-12-24 13:11:17 +10002061int
Dave Airlieff72145b2011-02-07 12:16:14 +10002062i915_gem_mmap_gtt(struct drm_file *file,
2063 struct drm_device *dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +10002064 uint32_t handle,
Dave Airlieff72145b2011-02-07 12:16:14 +10002065 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002066{
Chris Wilson05394f32010-11-08 19:18:58 +00002067 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002068 int ret;
2069
Chris Wilson03ac0642016-07-20 13:31:51 +01002070 obj = i915_gem_object_lookup(file, handle);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002071 if (!obj)
2072 return -ENOENT;
Chris Wilsonab182822009-09-22 18:46:17 +01002073
Chris Wilsond8cb5082012-08-11 15:41:03 +01002074 ret = i915_gem_object_create_mmap_offset(obj);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002075 if (ret == 0)
2076 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002077
Chris Wilsonf0cd5182016-10-28 13:58:43 +01002078 i915_gem_object_put(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002079 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002080}
2081
Dave Airlieff72145b2011-02-07 12:16:14 +10002082/**
2083 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2084 * @dev: DRM device
2085 * @data: GTT mapping ioctl data
2086 * @file: GEM object info
2087 *
2088 * Simply returns the fake offset to userspace so it can mmap it.
2089 * The mmap call will end up in drm_gem_mmap(), which will set things
2090 * up so we can get faults in the handler above.
2091 *
2092 * The fault handler will take care of binding the object into the GTT
2093 * (since it may have been evicted to make room for something), allocating
2094 * a fence register, and mapping the appropriate aperture address into
2095 * userspace.
2096 */
2097int
2098i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2099 struct drm_file *file)
2100{
2101 struct drm_i915_gem_mmap_gtt *args = data;
2102
Dave Airlieda6b51d2014-12-24 13:11:17 +10002103 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
Dave Airlieff72145b2011-02-07 12:16:14 +10002104}
2105
Daniel Vetter225067e2012-08-20 10:23:20 +02002106/* Immediately discard the backing storage */
2107static void
2108i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01002109{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002110 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02002111
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002112 if (obj->base.filp == NULL)
2113 return;
2114
Daniel Vetter225067e2012-08-20 10:23:20 +02002115 /* Our goal here is to return as much of the memory as
2116 * is possible back to the system as we are called from OOM.
2117 * To do this we must instruct the shmfs to drop all of its
2118 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01002119 */
Chris Wilson55372522014-03-25 13:23:06 +00002120 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002121 obj->mm.madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01002122}
Chris Wilsone5281cc2010-10-28 13:45:36 +01002123
Chris Wilson55372522014-03-25 13:23:06 +00002124/* Try to discard unwanted pages */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002125void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02002126{
Chris Wilson55372522014-03-25 13:23:06 +00002127 struct address_space *mapping;
2128
Chris Wilson1233e2d2016-10-28 13:58:37 +01002129 lockdep_assert_held(&obj->mm.lock);
2130 GEM_BUG_ON(obj->mm.pages);
2131
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002132 switch (obj->mm.madv) {
Chris Wilson55372522014-03-25 13:23:06 +00002133 case I915_MADV_DONTNEED:
2134 i915_gem_object_truncate(obj);
2135 case __I915_MADV_PURGED:
2136 return;
2137 }
2138
2139 if (obj->base.filp == NULL)
2140 return;
2141
Al Viro93c76a32015-12-04 23:45:44 -05002142 mapping = obj->base.filp->f_mapping,
Chris Wilson55372522014-03-25 13:23:06 +00002143 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002144}
2145
Chris Wilson5cdf5882010-09-27 15:51:07 +01002146static void
Chris Wilson03ac84f2016-10-28 13:58:36 +01002147i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2148 struct sg_table *pages)
Eric Anholt673a3942008-07-30 12:06:12 -07002149{
Dave Gordon85d12252016-05-20 11:54:06 +01002150 struct sgt_iter sgt_iter;
2151 struct page *page;
Daniel Vetter1286ff72012-05-10 15:25:09 +02002152
Chris Wilson03ac84f2016-10-28 13:58:36 +01002153 __i915_gem_object_release_shmem(obj);
Eric Anholt856fa192009-03-19 14:10:50 -07002154
Chris Wilson03ac84f2016-10-28 13:58:36 +01002155 i915_gem_gtt_finish_pages(obj, pages);
Imre Deake2273302015-07-09 12:59:05 +03002156
Daniel Vetter6dacfd22011-09-12 21:30:02 +02002157 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002158 i915_gem_object_save_bit_17_swizzle(obj, pages);
Eric Anholt280b7132009-03-12 16:56:27 -07002159
Chris Wilson03ac84f2016-10-28 13:58:36 +01002160 for_each_sgt_page(page, sgt_iter, pages) {
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002161 if (obj->mm.dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01002162 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002163
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002164 if (obj->mm.madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01002165 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002166
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002167 put_page(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002168 }
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002169 obj->mm.dirty = false;
Eric Anholt673a3942008-07-30 12:06:12 -07002170
Chris Wilson03ac84f2016-10-28 13:58:36 +01002171 sg_free_table(pages);
2172 kfree(pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01002173}
2174
Chris Wilson96d77632016-10-28 13:58:33 +01002175static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2176{
2177 struct radix_tree_iter iter;
2178 void **slot;
2179
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002180 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2181 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
Chris Wilson96d77632016-10-28 13:58:33 +01002182}
2183
Chris Wilson548625e2016-11-01 12:11:34 +00002184void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2185 enum i915_mm_subclass subclass)
Chris Wilson37e680a2012-06-07 15:38:42 +01002186{
Chris Wilson03ac84f2016-10-28 13:58:36 +01002187 struct sg_table *pages;
Chris Wilson37e680a2012-06-07 15:38:42 +01002188
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002189 if (i915_gem_object_has_pinned_pages(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002190 return;
Chris Wilsona5570172012-09-04 21:02:54 +01002191
Chris Wilson15717de2016-08-04 07:52:26 +01002192 GEM_BUG_ON(obj->bind_count);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002193 if (!READ_ONCE(obj->mm.pages))
2194 return;
2195
2196 /* May be called by shrinker from within get_pages() (on another bo) */
Chris Wilson548625e2016-11-01 12:11:34 +00002197 mutex_lock_nested(&obj->mm.lock, subclass);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002198 if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2199 goto unlock;
Ben Widawsky3e123022013-07-31 17:00:04 -07002200
Chris Wilsona2165e32012-12-03 11:49:00 +00002201 /* ->put_pages might need to allocate memory for the bit17 swizzle
2202 * array, hence protect them from being reaped by removing them from gtt
2203 * lists early. */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002204 pages = fetch_and_zero(&obj->mm.pages);
2205 GEM_BUG_ON(!pages);
Chris Wilsona2165e32012-12-03 11:49:00 +00002206
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002207 if (obj->mm.mapping) {
Chris Wilson4b30cb22016-08-18 17:16:42 +01002208 void *ptr;
2209
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002210 ptr = ptr_mask_bits(obj->mm.mapping);
Chris Wilson4b30cb22016-08-18 17:16:42 +01002211 if (is_vmalloc_addr(ptr))
2212 vunmap(ptr);
Chris Wilsonfb8621d2016-04-08 12:11:14 +01002213 else
Chris Wilson4b30cb22016-08-18 17:16:42 +01002214 kunmap(kmap_to_page(ptr));
2215
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002216 obj->mm.mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002217 }
2218
Chris Wilson96d77632016-10-28 13:58:33 +01002219 __i915_gem_object_reset_page_iter(obj);
2220
Chris Wilson03ac84f2016-10-28 13:58:36 +01002221 obj->ops->put_pages(obj, pages);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002222unlock:
2223 mutex_unlock(&obj->mm.lock);
Chris Wilson6c085a72012-08-20 11:40:46 +02002224}
2225
Chris Wilson4ff340f02016-10-18 13:02:50 +01002226static unsigned int swiotlb_max_size(void)
Chris Wilson871dfbd2016-10-11 09:20:21 +01002227{
2228#if IS_ENABLED(CONFIG_SWIOTLB)
2229 return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
2230#else
2231 return 0;
2232#endif
2233}
2234
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002235static void i915_sg_trim(struct sg_table *orig_st)
2236{
2237 struct sg_table new_st;
2238 struct scatterlist *sg, *new_sg;
2239 unsigned int i;
2240
2241 if (orig_st->nents == orig_st->orig_nents)
2242 return;
2243
2244 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
2245 return;
2246
2247 new_sg = new_st.sgl;
2248 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2249 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2250 /* called before being DMA mapped, no need to copy sg->dma_* */
2251 new_sg = sg_next(new_sg);
2252 }
2253
2254 sg_free_table(orig_st);
2255
2256 *orig_st = new_st;
2257}
2258
Chris Wilson03ac84f2016-10-28 13:58:36 +01002259static struct sg_table *
Chris Wilson6c085a72012-08-20 11:40:46 +02002260i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002261{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002262 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002263 int page_count, i;
2264 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01002265 struct sg_table *st;
2266 struct scatterlist *sg;
Dave Gordon85d12252016-05-20 11:54:06 +01002267 struct sgt_iter sgt_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07002268 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002269 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson4ff340f02016-10-18 13:02:50 +01002270 unsigned int max_segment;
Imre Deake2273302015-07-09 12:59:05 +03002271 int ret;
Chris Wilson6c085a72012-08-20 11:40:46 +02002272 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07002273
Chris Wilson6c085a72012-08-20 11:40:46 +02002274 /* Assert that the object is not currently in any GPU domain. As it
2275 * wasn't in the GTT, there shouldn't be any way it could have been in
2276 * a GPU cache
2277 */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002278 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2279 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Chris Wilson6c085a72012-08-20 11:40:46 +02002280
Chris Wilson871dfbd2016-10-11 09:20:21 +01002281 max_segment = swiotlb_max_size();
2282 if (!max_segment)
Chris Wilson4ff340f02016-10-18 13:02:50 +01002283 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
Chris Wilson871dfbd2016-10-11 09:20:21 +01002284
Chris Wilson9da3da62012-06-01 15:20:22 +01002285 st = kmalloc(sizeof(*st), GFP_KERNEL);
2286 if (st == NULL)
Chris Wilson03ac84f2016-10-28 13:58:36 +01002287 return ERR_PTR(-ENOMEM);
Eric Anholt673a3942008-07-30 12:06:12 -07002288
Chris Wilson9da3da62012-06-01 15:20:22 +01002289 page_count = obj->base.size / PAGE_SIZE;
2290 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01002291 kfree(st);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002292 return ERR_PTR(-ENOMEM);
Chris Wilson9da3da62012-06-01 15:20:22 +01002293 }
2294
2295 /* Get the list of pages out of our struct file. They'll be pinned
2296 * at this point until we release them.
2297 *
2298 * Fail silently without starting the shrinker
2299 */
Al Viro93c76a32015-12-04 23:45:44 -05002300 mapping = obj->base.filp->f_mapping;
Michal Hockoc62d2552015-11-06 16:28:49 -08002301 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
Mel Gormand0164ad2015-11-06 16:28:21 -08002302 gfp |= __GFP_NORETRY | __GFP_NOWARN;
Imre Deak90797e62013-02-18 19:28:03 +02002303 sg = st->sgl;
2304 st->nents = 0;
2305 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02002306 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2307 if (IS_ERR(page)) {
Chris Wilson21ab4e72014-09-09 11:16:08 +01002308 i915_gem_shrink(dev_priv,
2309 page_count,
2310 I915_SHRINK_BOUND |
2311 I915_SHRINK_UNBOUND |
2312 I915_SHRINK_PURGEABLE);
Chris Wilson6c085a72012-08-20 11:40:46 +02002313 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2314 }
2315 if (IS_ERR(page)) {
2316 /* We've tried hard to allocate the memory by reaping
2317 * our own buffer, now let the real VM do its job and
2318 * go down in flames if truly OOM.
2319 */
David Herrmannf461d1b2014-05-25 14:34:10 +02002320 page = shmem_read_mapping_page(mapping, i);
Imre Deake2273302015-07-09 12:59:05 +03002321 if (IS_ERR(page)) {
2322 ret = PTR_ERR(page);
Chris Wilson6c085a72012-08-20 11:40:46 +02002323 goto err_pages;
Imre Deake2273302015-07-09 12:59:05 +03002324 }
Chris Wilson6c085a72012-08-20 11:40:46 +02002325 }
Chris Wilson871dfbd2016-10-11 09:20:21 +01002326 if (!i ||
2327 sg->length >= max_segment ||
2328 page_to_pfn(page) != last_pfn + 1) {
Imre Deak90797e62013-02-18 19:28:03 +02002329 if (i)
2330 sg = sg_next(sg);
2331 st->nents++;
2332 sg_set_page(sg, page, PAGE_SIZE, 0);
2333 } else {
2334 sg->length += PAGE_SIZE;
2335 }
2336 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03002337
2338 /* Check that the i965g/gm workaround works. */
2339 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07002340 }
Chris Wilson871dfbd2016-10-11 09:20:21 +01002341 if (sg) /* loop terminated early; short sg table */
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002342 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01002343
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002344 /* Trim unused sg entries to avoid wasting memory. */
2345 i915_sg_trim(st);
2346
Chris Wilson03ac84f2016-10-28 13:58:36 +01002347 ret = i915_gem_gtt_prepare_pages(obj, st);
Imre Deake2273302015-07-09 12:59:05 +03002348 if (ret)
2349 goto err_pages;
2350
Eric Anholt673a3942008-07-30 12:06:12 -07002351 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002352 i915_gem_object_do_bit_17_swizzle(obj, st);
Eric Anholt673a3942008-07-30 12:06:12 -07002353
Chris Wilson03ac84f2016-10-28 13:58:36 +01002354 return st;
Eric Anholt673a3942008-07-30 12:06:12 -07002355
2356err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02002357 sg_mark_end(sg);
Dave Gordon85d12252016-05-20 11:54:06 +01002358 for_each_sgt_page(page, sgt_iter, st)
2359 put_page(page);
Chris Wilson9da3da62012-06-01 15:20:22 +01002360 sg_free_table(st);
2361 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00002362
2363 /* shmemfs first checks if there is enough memory to allocate the page
2364 * and reports ENOSPC should there be insufficient, along with the usual
2365 * ENOMEM for a genuine allocation failure.
2366 *
2367 * We use ENOSPC in our driver to mean that we have run out of aperture
2368 * space and so want to translate the error from shmemfs back to our
2369 * usual understanding of ENOMEM.
2370 */
Imre Deake2273302015-07-09 12:59:05 +03002371 if (ret == -ENOSPC)
2372 ret = -ENOMEM;
2373
Chris Wilson03ac84f2016-10-28 13:58:36 +01002374 return ERR_PTR(ret);
2375}
2376
2377void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2378 struct sg_table *pages)
2379{
Chris Wilson1233e2d2016-10-28 13:58:37 +01002380 lockdep_assert_held(&obj->mm.lock);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002381
2382 obj->mm.get_page.sg_pos = pages->sgl;
2383 obj->mm.get_page.sg_idx = 0;
2384
2385 obj->mm.pages = pages;
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002386
2387 if (i915_gem_object_is_tiled(obj) &&
2388 to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2389 GEM_BUG_ON(obj->mm.quirked);
2390 __i915_gem_object_pin_pages(obj);
2391 obj->mm.quirked = true;
2392 }
Chris Wilson03ac84f2016-10-28 13:58:36 +01002393}
2394
2395static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2396{
2397 struct sg_table *pages;
2398
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002399 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2400
Chris Wilson03ac84f2016-10-28 13:58:36 +01002401 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2402 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2403 return -EFAULT;
2404 }
2405
2406 pages = obj->ops->get_pages(obj);
2407 if (unlikely(IS_ERR(pages)))
2408 return PTR_ERR(pages);
2409
2410 __i915_gem_object_set_pages(obj, pages);
2411 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002412}
2413
Chris Wilson37e680a2012-06-07 15:38:42 +01002414/* Ensure that the associated pages are gathered from the backing storage
Chris Wilson1233e2d2016-10-28 13:58:37 +01002415 * and pinned into our object. i915_gem_object_pin_pages() may be called
Chris Wilson37e680a2012-06-07 15:38:42 +01002416 * multiple times before they are released by a single call to
Chris Wilson1233e2d2016-10-28 13:58:37 +01002417 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
Chris Wilson37e680a2012-06-07 15:38:42 +01002418 * either as a result of memory pressure (reaping pages under the shrinker)
2419 * or as the object is itself released.
2420 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002421int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
Chris Wilson37e680a2012-06-07 15:38:42 +01002422{
Chris Wilson03ac84f2016-10-28 13:58:36 +01002423 int err;
Chris Wilson37e680a2012-06-07 15:38:42 +01002424
Chris Wilson1233e2d2016-10-28 13:58:37 +01002425 err = mutex_lock_interruptible(&obj->mm.lock);
2426 if (err)
2427 return err;
Chris Wilson4c7d62c2016-10-28 13:58:32 +01002428
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002429 if (unlikely(!obj->mm.pages)) {
2430 err = ____i915_gem_object_get_pages(obj);
2431 if (err)
2432 goto unlock;
2433
2434 smp_mb__before_atomic();
Chris Wilson1233e2d2016-10-28 13:58:37 +01002435 }
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002436 atomic_inc(&obj->mm.pages_pin_count);
Chris Wilson43e28f02013-01-08 10:53:09 +00002437
Chris Wilson1233e2d2016-10-28 13:58:37 +01002438unlock:
2439 mutex_unlock(&obj->mm.lock);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002440 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07002441}
2442
Dave Gordondd6034c2016-05-20 11:54:04 +01002443/* The 'mapping' part of i915_gem_object_pin_map() below */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002444static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2445 enum i915_map_type type)
Dave Gordondd6034c2016-05-20 11:54:04 +01002446{
2447 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002448 struct sg_table *sgt = obj->mm.pages;
Dave Gordon85d12252016-05-20 11:54:06 +01002449 struct sgt_iter sgt_iter;
2450 struct page *page;
Dave Gordonb338fa42016-05-20 11:54:05 +01002451 struct page *stack_pages[32];
2452 struct page **pages = stack_pages;
Dave Gordondd6034c2016-05-20 11:54:04 +01002453 unsigned long i = 0;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002454 pgprot_t pgprot;
Dave Gordondd6034c2016-05-20 11:54:04 +01002455 void *addr;
2456
2457 /* A single page can always be kmapped */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002458 if (n_pages == 1 && type == I915_MAP_WB)
Dave Gordondd6034c2016-05-20 11:54:04 +01002459 return kmap(sg_page(sgt->sgl));
2460
Dave Gordonb338fa42016-05-20 11:54:05 +01002461 if (n_pages > ARRAY_SIZE(stack_pages)) {
2462 /* Too big for stack -- allocate temporary array instead */
2463 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2464 if (!pages)
2465 return NULL;
2466 }
Dave Gordondd6034c2016-05-20 11:54:04 +01002467
Dave Gordon85d12252016-05-20 11:54:06 +01002468 for_each_sgt_page(page, sgt_iter, sgt)
2469 pages[i++] = page;
Dave Gordondd6034c2016-05-20 11:54:04 +01002470
2471 /* Check that we have the expected number of pages */
2472 GEM_BUG_ON(i != n_pages);
2473
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002474 switch (type) {
2475 case I915_MAP_WB:
2476 pgprot = PAGE_KERNEL;
2477 break;
2478 case I915_MAP_WC:
2479 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2480 break;
2481 }
2482 addr = vmap(pages, n_pages, 0, pgprot);
Dave Gordondd6034c2016-05-20 11:54:04 +01002483
Dave Gordonb338fa42016-05-20 11:54:05 +01002484 if (pages != stack_pages)
2485 drm_free_large(pages);
Dave Gordondd6034c2016-05-20 11:54:04 +01002486
2487 return addr;
2488}
2489
2490/* get, pin, and map the pages of the object into kernel space */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002491void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2492 enum i915_map_type type)
Chris Wilson0a798eb2016-04-08 12:11:11 +01002493{
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002494 enum i915_map_type has_type;
2495 bool pinned;
2496 void *ptr;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002497 int ret;
2498
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002499 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
Chris Wilson0a798eb2016-04-08 12:11:11 +01002500
Chris Wilson1233e2d2016-10-28 13:58:37 +01002501 ret = mutex_lock_interruptible(&obj->mm.lock);
Chris Wilson0a798eb2016-04-08 12:11:11 +01002502 if (ret)
2503 return ERR_PTR(ret);
2504
Chris Wilson1233e2d2016-10-28 13:58:37 +01002505 pinned = true;
2506 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002507 if (unlikely(!obj->mm.pages)) {
2508 ret = ____i915_gem_object_get_pages(obj);
2509 if (ret)
2510 goto err_unlock;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002511
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002512 smp_mb__before_atomic();
2513 }
2514 atomic_inc(&obj->mm.pages_pin_count);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002515 pinned = false;
2516 }
2517 GEM_BUG_ON(!obj->mm.pages);
Chris Wilson0a798eb2016-04-08 12:11:11 +01002518
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002519 ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002520 if (ptr && has_type != type) {
2521 if (pinned) {
2522 ret = -EBUSY;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002523 goto err_unpin;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002524 }
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002525
2526 if (is_vmalloc_addr(ptr))
2527 vunmap(ptr);
2528 else
2529 kunmap(kmap_to_page(ptr));
2530
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002531 ptr = obj->mm.mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002532 }
2533
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002534 if (!ptr) {
2535 ptr = i915_gem_object_map(obj, type);
2536 if (!ptr) {
2537 ret = -ENOMEM;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002538 goto err_unpin;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002539 }
2540
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002541 obj->mm.mapping = ptr_pack_bits(ptr, type);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002542 }
2543
Chris Wilson1233e2d2016-10-28 13:58:37 +01002544out_unlock:
2545 mutex_unlock(&obj->mm.lock);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002546 return ptr;
2547
Chris Wilson1233e2d2016-10-28 13:58:37 +01002548err_unpin:
2549 atomic_dec(&obj->mm.pages_pin_count);
2550err_unlock:
2551 ptr = ERR_PTR(ret);
2552 goto out_unlock;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002553}
2554
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002555static bool i915_context_is_banned(const struct i915_gem_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002556{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002557 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002558
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002559 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002560 return true;
2561
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002562 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
Chris Wilson676fa572014-12-24 08:13:39 -08002563 if (ctx->hang_stats.ban_period_seconds &&
2564 elapsed <= ctx->hang_stats.ban_period_seconds) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002565 DRM_DEBUG("context hanging too fast, banning!\n");
2566 return true;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002567 }
2568
2569 return false;
2570}
2571
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002572static void i915_set_reset_status(struct i915_gem_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002573 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002574{
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002575 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002576
2577 if (guilty) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002578 hs->banned = i915_context_is_banned(ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002579 hs->batch_active++;
2580 hs->guilty_ts = get_seconds();
2581 } else {
2582 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002583 }
2584}
2585
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002586struct drm_i915_gem_request *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002587i915_gem_find_active_request(struct intel_engine_cs *engine)
Chris Wilson9375e442010-09-19 12:21:28 +01002588{
Chris Wilson4db080f2013-12-04 11:37:09 +00002589 struct drm_i915_gem_request *request;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002590
Chris Wilsonf69a02c2016-07-01 17:23:16 +01002591 /* We are called by the error capture and reset at a random
2592 * point in time. In particular, note that neither is crucially
2593 * ordered with an interrupt. After a hang, the GPU is dead and we
2594 * assume that no more writes can happen (we waited long enough for
2595 * all writes that were in transaction to be flushed) - adding an
2596 * extra delay for a recent interrupt is pointless. Hence, we do
2597 * not need an engine->irq_seqno_barrier() before the seqno reads.
2598 */
Chris Wilson73cb9702016-10-28 13:58:46 +01002599 list_for_each_entry(request, &engine->timeline->requests, link) {
Chris Wilson80b204b2016-10-28 13:58:58 +01002600 if (__i915_gem_request_completed(request))
Chris Wilson4db080f2013-12-04 11:37:09 +00002601 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002602
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002603 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002604 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002605
2606 return NULL;
2607}
2608
Chris Wilson821ed7d2016-09-09 14:11:53 +01002609static void reset_request(struct drm_i915_gem_request *request)
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002610{
Chris Wilson821ed7d2016-09-09 14:11:53 +01002611 void *vaddr = request->ring->vaddr;
2612 u32 head;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002613
Chris Wilson821ed7d2016-09-09 14:11:53 +01002614 /* As this request likely depends on state from the lost
2615 * context, clear out all the user operations leaving the
2616 * breadcrumb at the end (so we get the fence notifications).
2617 */
2618 head = request->head;
2619 if (request->postfix < head) {
2620 memset(vaddr + head, 0, request->ring->size - head);
2621 head = 0;
2622 }
2623 memset(vaddr + head, 0, request->postfix - head);
Chris Wilson4db080f2013-12-04 11:37:09 +00002624}
2625
Chris Wilson821ed7d2016-09-09 14:11:53 +01002626static void i915_gem_reset_engine(struct intel_engine_cs *engine)
Chris Wilson4db080f2013-12-04 11:37:09 +00002627{
Chris Wilsondcff85c2016-08-05 10:14:11 +01002628 struct drm_i915_gem_request *request;
Chris Wilson821ed7d2016-09-09 14:11:53 +01002629 struct i915_gem_context *incomplete_ctx;
Chris Wilson80b204b2016-10-28 13:58:58 +01002630 struct intel_timeline *timeline;
Chris Wilson821ed7d2016-09-09 14:11:53 +01002631 bool ring_hung;
Chris Wilson608c1a52015-09-03 13:01:40 +01002632
Chris Wilson821ed7d2016-09-09 14:11:53 +01002633 if (engine->irq_seqno_barrier)
2634 engine->irq_seqno_barrier(engine);
2635
2636 request = i915_gem_find_active_request(engine);
2637 if (!request)
2638 return;
2639
2640 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
Chris Wilson77c60702016-10-04 21:11:29 +01002641 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
2642 ring_hung = false;
2643
Chris Wilson821ed7d2016-09-09 14:11:53 +01002644 i915_set_reset_status(request->ctx, ring_hung);
2645 if (!ring_hung)
2646 return;
2647
2648 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
Chris Wilson65e47602016-10-28 13:58:49 +01002649 engine->name, request->global_seqno);
Chris Wilson821ed7d2016-09-09 14:11:53 +01002650
2651 /* Setup the CS to resume from the breadcrumb of the hung request */
2652 engine->reset_hw(engine, request);
2653
2654 /* Users of the default context do not rely on logical state
2655 * preserved between batches. They have to emit full state on
2656 * every batch and so it is safe to execute queued requests following
2657 * the hang.
2658 *
2659 * Other contexts preserve state, now corrupt. We want to skip all
2660 * queued requests that reference the corrupt context.
2661 */
2662 incomplete_ctx = request->ctx;
2663 if (i915_gem_context_is_default(incomplete_ctx))
2664 return;
2665
Chris Wilson73cb9702016-10-28 13:58:46 +01002666 list_for_each_entry_continue(request, &engine->timeline->requests, link)
Chris Wilson821ed7d2016-09-09 14:11:53 +01002667 if (request->ctx == incomplete_ctx)
2668 reset_request(request);
Chris Wilson80b204b2016-10-28 13:58:58 +01002669
2670 timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
2671 list_for_each_entry(request, &timeline->requests, link)
2672 reset_request(request);
Chris Wilson821ed7d2016-09-09 14:11:53 +01002673}
2674
2675void i915_gem_reset(struct drm_i915_private *dev_priv)
2676{
2677 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302678 enum intel_engine_id id;
Chris Wilson821ed7d2016-09-09 14:11:53 +01002679
Chris Wilson4c7d62c2016-10-28 13:58:32 +01002680 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2681
Chris Wilson821ed7d2016-09-09 14:11:53 +01002682 i915_gem_retire_requests(dev_priv);
2683
Akash Goel3b3f1652016-10-13 22:44:48 +05302684 for_each_engine(engine, dev_priv, id)
Chris Wilson821ed7d2016-09-09 14:11:53 +01002685 i915_gem_reset_engine(engine);
2686
2687 i915_gem_restore_fences(&dev_priv->drm);
Chris Wilsonf2a91d12016-09-21 14:51:06 +01002688
2689 if (dev_priv->gt.awake) {
2690 intel_sanitize_gt_powersave(dev_priv);
2691 intel_enable_gt_powersave(dev_priv);
2692 if (INTEL_GEN(dev_priv) >= 6)
2693 gen6_rps_busy(dev_priv);
2694 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01002695}
2696
2697static void nop_submit_request(struct drm_i915_gem_request *request)
2698{
2699}
2700
2701static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
2702{
2703 engine->submit_request = nop_submit_request;
Chris Wilson70c2a242016-09-09 14:11:46 +01002704
Chris Wilsonc4b09302016-07-20 09:21:10 +01002705 /* Mark all pending requests as complete so that any concurrent
2706 * (lockless) lookup doesn't try and wait upon the request as we
2707 * reset it.
2708 */
Chris Wilson73cb9702016-10-28 13:58:46 +01002709 intel_engine_init_global_seqno(engine,
Chris Wilsoncb399ea2016-11-01 10:03:16 +00002710 intel_engine_last_submit(engine));
Chris Wilsonc4b09302016-07-20 09:21:10 +01002711
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002712 /*
Oscar Mateodcb4c122014-11-13 10:28:10 +00002713 * Clear the execlists queue up before freeing the requests, as those
2714 * are the ones that keep the context and ringbuffer backing objects
2715 * pinned in place.
2716 */
Oscar Mateodcb4c122014-11-13 10:28:10 +00002717
Tomas Elf7de1691a2015-10-19 16:32:32 +01002718 if (i915.enable_execlists) {
Chris Wilson70c2a242016-09-09 14:11:46 +01002719 spin_lock(&engine->execlist_lock);
2720 INIT_LIST_HEAD(&engine->execlist_queue);
2721 i915_gem_request_put(engine->execlist_port[0].request);
2722 i915_gem_request_put(engine->execlist_port[1].request);
2723 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2724 spin_unlock(&engine->execlist_lock);
Oscar Mateodcb4c122014-11-13 10:28:10 +00002725 }
Eric Anholt673a3942008-07-30 12:06:12 -07002726}
2727
Chris Wilson821ed7d2016-09-09 14:11:53 +01002728void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07002729{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002730 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302731 enum intel_engine_id id;
Eric Anholt673a3942008-07-30 12:06:12 -07002732
Chris Wilson821ed7d2016-09-09 14:11:53 +01002733 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2734 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
Chris Wilson4db080f2013-12-04 11:37:09 +00002735
Chris Wilson821ed7d2016-09-09 14:11:53 +01002736 i915_gem_context_lost(dev_priv);
Akash Goel3b3f1652016-10-13 22:44:48 +05302737 for_each_engine(engine, dev_priv, id)
Chris Wilson821ed7d2016-09-09 14:11:53 +01002738 i915_gem_cleanup_engine(engine);
Chris Wilsonb913b332016-07-13 09:10:31 +01002739 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
Chris Wilsondfaae392010-09-22 10:31:52 +01002740
Chris Wilson821ed7d2016-09-09 14:11:53 +01002741 i915_gem_retire_requests(dev_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07002742}
2743
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002744static void
Eric Anholt673a3942008-07-30 12:06:12 -07002745i915_gem_retire_work_handler(struct work_struct *work)
2746{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002747 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01002748 container_of(work, typeof(*dev_priv), gt.retire_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01002749 struct drm_device *dev = &dev_priv->drm;
Eric Anholt673a3942008-07-30 12:06:12 -07002750
Chris Wilson891b48c2010-09-29 12:26:37 +01002751 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002752 if (mutex_trylock(&dev->struct_mutex)) {
Chris Wilson67d97da2016-07-04 08:08:31 +01002753 i915_gem_retire_requests(dev_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002754 mutex_unlock(&dev->struct_mutex);
2755 }
Chris Wilson67d97da2016-07-04 08:08:31 +01002756
2757 /* Keep the retire handler running until we are finally idle.
2758 * We do not need to do this test under locking as in the worst-case
2759 * we queue the retire worker once too often.
2760 */
Chris Wilsonc9615612016-07-09 10:12:06 +01002761 if (READ_ONCE(dev_priv->gt.awake)) {
2762 i915_queue_hangcheck(dev_priv);
Chris Wilson67d97da2016-07-04 08:08:31 +01002763 queue_delayed_work(dev_priv->wq,
2764 &dev_priv->gt.retire_work,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002765 round_jiffies_up_relative(HZ));
Chris Wilsonc9615612016-07-09 10:12:06 +01002766 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002767}
Chris Wilson891b48c2010-09-29 12:26:37 +01002768
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002769static void
2770i915_gem_idle_work_handler(struct work_struct *work)
2771{
2772 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01002773 container_of(work, typeof(*dev_priv), gt.idle_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01002774 struct drm_device *dev = &dev_priv->drm;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002775 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302776 enum intel_engine_id id;
Chris Wilson67d97da2016-07-04 08:08:31 +01002777 bool rearm_hangcheck;
2778
2779 if (!READ_ONCE(dev_priv->gt.awake))
2780 return;
2781
Imre Deak0cb56702016-11-07 11:20:04 +02002782 /*
2783 * Wait for last execlists context complete, but bail out in case a
2784 * new request is submitted.
2785 */
2786 wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
2787 intel_execlists_idle(dev_priv), 10);
2788
Chris Wilson28176ef2016-10-28 13:58:56 +01002789 if (READ_ONCE(dev_priv->gt.active_requests))
Chris Wilson67d97da2016-07-04 08:08:31 +01002790 return;
2791
2792 rearm_hangcheck =
2793 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2794
2795 if (!mutex_trylock(&dev->struct_mutex)) {
2796 /* Currently busy, come back later */
2797 mod_delayed_work(dev_priv->wq,
2798 &dev_priv->gt.idle_work,
2799 msecs_to_jiffies(50));
2800 goto out_rearm;
2801 }
2802
Imre Deak93c97dc2016-11-07 11:20:03 +02002803 /*
2804 * New request retired after this work handler started, extend active
2805 * period until next instance of the work.
2806 */
2807 if (work_pending(work))
2808 goto out_unlock;
2809
Chris Wilson28176ef2016-10-28 13:58:56 +01002810 if (dev_priv->gt.active_requests)
Chris Wilson67d97da2016-07-04 08:08:31 +01002811 goto out_unlock;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002812
Imre Deak0cb56702016-11-07 11:20:04 +02002813 if (wait_for(intel_execlists_idle(dev_priv), 10))
2814 DRM_ERROR("Timeout waiting for engines to idle\n");
2815
Akash Goel3b3f1652016-10-13 22:44:48 +05302816 for_each_engine(engine, dev_priv, id)
Chris Wilson67d97da2016-07-04 08:08:31 +01002817 i915_gem_batch_pool_fini(&engine->batch_pool);
Zou Nan hai852835f2010-05-21 09:08:56 +08002818
Chris Wilson67d97da2016-07-04 08:08:31 +01002819 GEM_BUG_ON(!dev_priv->gt.awake);
2820 dev_priv->gt.awake = false;
2821 rearm_hangcheck = false;
Daniel Vetter30ecad72015-12-09 09:29:36 +01002822
Chris Wilson67d97da2016-07-04 08:08:31 +01002823 if (INTEL_GEN(dev_priv) >= 6)
2824 gen6_rps_idle(dev_priv);
2825 intel_runtime_pm_put(dev_priv);
2826out_unlock:
2827 mutex_unlock(&dev->struct_mutex);
Chris Wilson35c94182015-04-07 16:20:37 +01002828
Chris Wilson67d97da2016-07-04 08:08:31 +01002829out_rearm:
2830 if (rearm_hangcheck) {
2831 GEM_BUG_ON(!dev_priv->gt.awake);
2832 i915_queue_hangcheck(dev_priv);
Chris Wilson35c94182015-04-07 16:20:37 +01002833 }
Eric Anholt673a3942008-07-30 12:06:12 -07002834}
2835
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002836void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2837{
2838 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2839 struct drm_i915_file_private *fpriv = file->driver_priv;
2840 struct i915_vma *vma, *vn;
2841
2842 mutex_lock(&obj->base.dev->struct_mutex);
2843 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2844 if (vma->vm->file == fpriv)
2845 i915_vma_close(vma);
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01002846
2847 if (i915_gem_object_is_active(obj) &&
2848 !i915_gem_object_has_active_reference(obj)) {
2849 i915_gem_object_set_active_reference(obj);
2850 i915_gem_object_get(obj);
2851 }
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002852 mutex_unlock(&obj->base.dev->struct_mutex);
2853}
2854
Chris Wilsone95433c2016-10-28 13:58:27 +01002855static unsigned long to_wait_timeout(s64 timeout_ns)
2856{
2857 if (timeout_ns < 0)
2858 return MAX_SCHEDULE_TIMEOUT;
2859
2860 if (timeout_ns == 0)
2861 return 0;
2862
2863 return nsecs_to_jiffies_timeout(timeout_ns);
2864}
2865
Ben Widawsky5816d642012-04-11 11:18:19 -07002866/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002867 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002868 * @dev: drm device pointer
2869 * @data: ioctl data blob
2870 * @file: drm file pointer
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002871 *
2872 * Returns 0 if successful, else an error is returned with the remaining time in
2873 * the timeout parameter.
2874 * -ETIME: object is still busy after timeout
2875 * -ERESTARTSYS: signal interrupted the wait
2876 * -ENONENT: object doesn't exist
2877 * Also possible, but rare:
2878 * -EAGAIN: GPU wedged
2879 * -ENOMEM: damn
2880 * -ENODEV: Internal IRQ fail
2881 * -E?: The add request failed
2882 *
2883 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2884 * non-zero timeout parameter the wait ioctl will wait for the given number of
2885 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2886 * without holding struct_mutex the object may become re-busied before this
2887 * function completes. A similar but shorter * race condition exists in the busy
2888 * ioctl
2889 */
2890int
2891i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2892{
2893 struct drm_i915_gem_wait *args = data;
2894 struct drm_i915_gem_object *obj;
Chris Wilsone95433c2016-10-28 13:58:27 +01002895 ktime_t start;
2896 long ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002897
Daniel Vetter11b5d512014-09-29 15:31:26 +02002898 if (args->flags != 0)
2899 return -EINVAL;
2900
Chris Wilson03ac0642016-07-20 13:31:51 +01002901 obj = i915_gem_object_lookup(file, args->bo_handle);
Chris Wilson033d5492016-08-05 10:14:17 +01002902 if (!obj)
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002903 return -ENOENT;
Chris Wilson033d5492016-08-05 10:14:17 +01002904
Chris Wilsone95433c2016-10-28 13:58:27 +01002905 start = ktime_get();
2906
2907 ret = i915_gem_object_wait(obj,
2908 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
2909 to_wait_timeout(args->timeout_ns),
2910 to_rps_client(file));
2911
2912 if (args->timeout_ns > 0) {
2913 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
2914 if (args->timeout_ns < 0)
2915 args->timeout_ns = 0;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002916 }
2917
Chris Wilsonf0cd5182016-10-28 13:58:43 +01002918 i915_gem_object_put(obj);
John Harrisonff865882014-11-24 18:49:28 +00002919 return ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002920}
2921
Chris Wilson73cb9702016-10-28 13:58:46 +01002922static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002923{
Chris Wilson73cb9702016-10-28 13:58:46 +01002924 int ret, i;
2925
2926 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
2927 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
2928 if (ret)
2929 return ret;
2930 }
2931
2932 return 0;
2933}
2934
2935int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
2936{
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002937 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002938
Chris Wilson9caa34a2016-11-11 14:58:08 +00002939 if (flags & I915_WAIT_LOCKED) {
2940 struct i915_gem_timeline *tl;
2941
2942 lockdep_assert_held(&i915->drm.struct_mutex);
2943
2944 list_for_each_entry(tl, &i915->gt.timelines, link) {
2945 ret = wait_for_timeline(tl, flags);
2946 if (ret)
2947 return ret;
2948 }
2949 } else {
2950 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002951 if (ret)
2952 return ret;
2953 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002954
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002955 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002956}
2957
Chris Wilsond0da48c2016-11-06 12:59:59 +00002958void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
2959 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07002960{
Eric Anholt673a3942008-07-30 12:06:12 -07002961 /* If we don't have a page list set up, then we're not pinned
2962 * to GPU, and we can ignore the cache flush because it'll happen
2963 * again at bind time.
2964 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002965 if (!obj->mm.pages)
Chris Wilsond0da48c2016-11-06 12:59:59 +00002966 return;
Eric Anholt673a3942008-07-30 12:06:12 -07002967
Imre Deak769ce462013-02-13 21:56:05 +02002968 /*
2969 * Stolen memory is always coherent with the GPU as it is explicitly
2970 * marked as wc by the system, or the system is cache-coherent.
2971 */
Chris Wilson6a2c4232014-11-04 04:51:40 -08002972 if (obj->stolen || obj->phys_handle)
Chris Wilsond0da48c2016-11-06 12:59:59 +00002973 return;
Imre Deak769ce462013-02-13 21:56:05 +02002974
Chris Wilson9c23f7f2011-03-29 16:59:52 -07002975 /* If the GPU is snooping the contents of the CPU cache,
2976 * we do not need to manually clear the CPU cache lines. However,
2977 * the caches are only snooped when the render cache is
2978 * flushed/invalidated. As we always have to emit invalidations
2979 * and flushes when moving into and out of the RENDER domain, correct
2980 * snooping behaviour occurs naturally as the result of our domain
2981 * tracking.
2982 */
Chris Wilson0f719792015-01-13 13:32:52 +00002983 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
2984 obj->cache_dirty = true;
Chris Wilsond0da48c2016-11-06 12:59:59 +00002985 return;
Chris Wilson0f719792015-01-13 13:32:52 +00002986 }
Chris Wilson9c23f7f2011-03-29 16:59:52 -07002987
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002988 trace_i915_gem_object_clflush(obj);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002989 drm_clflush_sg(obj->mm.pages);
Chris Wilson0f719792015-01-13 13:32:52 +00002990 obj->cache_dirty = false;
Eric Anholte47c68e2008-11-14 13:35:19 -08002991}
2992
2993/** Flushes the GTT write domain for the object if it's dirty. */
2994static void
Chris Wilson05394f32010-11-08 19:18:58 +00002995i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002996{
Chris Wilson3b5724d2016-08-18 17:16:49 +01002997 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002998
Chris Wilson05394f32010-11-08 19:18:58 +00002999 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003000 return;
3001
Chris Wilson63256ec2011-01-04 18:42:07 +00003002 /* No actual flushing is required for the GTT write domain. Writes
Chris Wilson3b5724d2016-08-18 17:16:49 +01003003 * to it "immediately" go to main memory as far as we know, so there's
Eric Anholte47c68e2008-11-14 13:35:19 -08003004 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003005 *
3006 * However, we do have to enforce the order so that all writes through
3007 * the GTT land before any writes to the device, such as updates to
3008 * the GATT itself.
Chris Wilson3b5724d2016-08-18 17:16:49 +01003009 *
3010 * We also have to wait a bit for the writes to land from the GTT.
3011 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3012 * timing. This issue has only been observed when switching quickly
3013 * between GTT writes and CPU reads from inside the kernel on recent hw,
3014 * and it appears to only affect discrete GTT blocks (i.e. on LLC
3015 * system agents we cannot reproduce this behaviour).
Eric Anholte47c68e2008-11-14 13:35:19 -08003016 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003017 wmb();
Chris Wilson3b5724d2016-08-18 17:16:49 +01003018 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
Akash Goel3b3f1652016-10-13 22:44:48 +05303019 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
Chris Wilson63256ec2011-01-04 18:42:07 +00003020
Chris Wilsond243ad82016-08-18 17:16:44 +01003021 intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
Daniel Vetterf99d7062014-06-19 16:01:59 +02003022
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003023 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003024 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003025 obj->base.read_domains,
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003026 I915_GEM_DOMAIN_GTT);
Eric Anholte47c68e2008-11-14 13:35:19 -08003027}
3028
3029/** Flushes the CPU write domain for the object if it's dirty. */
3030static void
Daniel Vettere62b59e2015-01-21 14:53:48 +01003031i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003032{
Chris Wilson05394f32010-11-08 19:18:58 +00003033 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003034 return;
3035
Chris Wilsond0da48c2016-11-06 12:59:59 +00003036 i915_gem_clflush_object(obj, obj->pin_display);
Rodrigo Vivide152b62015-07-07 16:28:51 -07003037 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Daniel Vetterf99d7062014-06-19 16:01:59 +02003038
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003039 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003040 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003041 obj->base.read_domains,
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003042 I915_GEM_DOMAIN_CPU);
Eric Anholte47c68e2008-11-14 13:35:19 -08003043}
3044
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003045/**
3046 * Moves a single object to the GTT read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003047 * @obj: object to act on
3048 * @write: ask for write access or read only
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003049 *
3050 * This function returns when the move is complete, including waiting on
3051 * flushes to occur.
3052 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003053int
Chris Wilson20217462010-11-23 15:26:33 +00003054i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003055{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003056 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003057 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003058
Chris Wilsone95433c2016-10-28 13:58:27 +01003059 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003060
Chris Wilsone95433c2016-10-28 13:58:27 +01003061 ret = i915_gem_object_wait(obj,
3062 I915_WAIT_INTERRUPTIBLE |
3063 I915_WAIT_LOCKED |
3064 (write ? I915_WAIT_ALL : 0),
3065 MAX_SCHEDULE_TIMEOUT,
3066 NULL);
Chris Wilson88241782011-01-07 17:09:48 +00003067 if (ret)
3068 return ret;
3069
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003070 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3071 return 0;
3072
Chris Wilson43566de2015-01-02 16:29:29 +05303073 /* Flush and acquire obj->pages so that we are coherent through
3074 * direct access in memory with previous cached writes through
3075 * shmemfs and that our cache domain tracking remains valid.
3076 * For example, if the obj->filp was moved to swap without us
3077 * being notified and releasing the pages, we would mistakenly
3078 * continue to assume that the obj remained out of the CPU cached
3079 * domain.
3080 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003081 ret = i915_gem_object_pin_pages(obj);
Chris Wilson43566de2015-01-02 16:29:29 +05303082 if (ret)
3083 return ret;
3084
Daniel Vettere62b59e2015-01-21 14:53:48 +01003085 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003086
Chris Wilsond0a57782012-10-09 19:24:37 +01003087 /* Serialise direct access to this object with the barriers for
3088 * coherent writes from the GPU, by effectively invalidating the
3089 * GTT domain upon first access.
3090 */
3091 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3092 mb();
3093
Chris Wilson05394f32010-11-08 19:18:58 +00003094 old_write_domain = obj->base.write_domain;
3095 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003096
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003097 /* It should now be out of any other write domains, and we can update
3098 * the domain values for our changes.
3099 */
Chris Wilson40e62d52016-10-28 13:58:41 +01003100 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00003101 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003102 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003103 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3104 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003105 obj->mm.dirty = true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003106 }
3107
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003108 trace_i915_gem_object_change_domain(obj,
3109 old_read_domains,
3110 old_write_domain);
3111
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003112 i915_gem_object_unpin_pages(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003113 return 0;
3114}
3115
Chris Wilsonef55f922015-10-09 14:11:27 +01003116/**
3117 * Changes the cache-level of an object across all VMA.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003118 * @obj: object to act on
3119 * @cache_level: new cache level to set for the object
Chris Wilsonef55f922015-10-09 14:11:27 +01003120 *
3121 * After this function returns, the object will be in the new cache-level
3122 * across all GTT and the contents of the backing storage will be coherent,
3123 * with respect to the new cache-level. In order to keep the backing storage
3124 * coherent for all users, we only allow a single cache level to be set
3125 * globally on the object and prevent it from being changed whilst the
3126 * hardware is reading from the object. That is if the object is currently
3127 * on the scanout it will be set to uncached (or equivalent display
3128 * cache coherency) and all non-MOCS GPU access will also be uncached so
3129 * that all direct access to the scanout remains coherent.
3130 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003131int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3132 enum i915_cache_level cache_level)
3133{
Chris Wilsonaa653a62016-08-04 07:52:27 +01003134 struct i915_vma *vma;
Ville Syrjäläed75a552015-08-11 19:47:10 +03003135 int ret = 0;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003136
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003137 lockdep_assert_held(&obj->base.dev->struct_mutex);
3138
Chris Wilsone4ffd172011-04-04 09:44:39 +01003139 if (obj->cache_level == cache_level)
Ville Syrjäläed75a552015-08-11 19:47:10 +03003140 goto out;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003141
Chris Wilsonef55f922015-10-09 14:11:27 +01003142 /* Inspect the list of currently bound VMA and unbind any that would
3143 * be invalid given the new cache-level. This is principally to
3144 * catch the issue of the CS prefetch crossing page boundaries and
3145 * reading an invalid PTE on older architectures.
3146 */
Chris Wilsonaa653a62016-08-04 07:52:27 +01003147restart:
3148 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003149 if (!drm_mm_node_allocated(&vma->node))
3150 continue;
3151
Chris Wilson20dfbde2016-08-04 16:32:30 +01003152 if (i915_vma_is_pinned(vma)) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003153 DRM_DEBUG("can not change the cache level of pinned objects\n");
3154 return -EBUSY;
3155 }
3156
Chris Wilsonaa653a62016-08-04 07:52:27 +01003157 if (i915_gem_valid_gtt_space(vma, cache_level))
3158 continue;
3159
3160 ret = i915_vma_unbind(vma);
3161 if (ret)
3162 return ret;
3163
3164 /* As unbinding may affect other elements in the
3165 * obj->vma_list (due to side-effects from retiring
3166 * an active vma), play safe and restart the iterator.
3167 */
3168 goto restart;
Chris Wilson42d6ab42012-07-26 11:49:32 +01003169 }
3170
Chris Wilsonef55f922015-10-09 14:11:27 +01003171 /* We can reuse the existing drm_mm nodes but need to change the
3172 * cache-level on the PTE. We could simply unbind them all and
3173 * rebind with the correct cache-level on next use. However since
3174 * we already have a valid slot, dma mapping, pages etc, we may as
3175 * rewrite the PTE in the belief that doing so tramples upon less
3176 * state and so involves less work.
3177 */
Chris Wilson15717de2016-08-04 07:52:26 +01003178 if (obj->bind_count) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003179 /* Before we change the PTE, the GPU must not be accessing it.
3180 * If we wait upon the object, we know that all the bound
3181 * VMA are no longer active.
3182 */
Chris Wilsone95433c2016-10-28 13:58:27 +01003183 ret = i915_gem_object_wait(obj,
3184 I915_WAIT_INTERRUPTIBLE |
3185 I915_WAIT_LOCKED |
3186 I915_WAIT_ALL,
3187 MAX_SCHEDULE_TIMEOUT,
3188 NULL);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003189 if (ret)
3190 return ret;
3191
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00003192 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3193 cache_level != I915_CACHE_NONE) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003194 /* Access to snoopable pages through the GTT is
3195 * incoherent and on some machines causes a hard
3196 * lockup. Relinquish the CPU mmaping to force
3197 * userspace to refault in the pages and we can
3198 * then double check if the GTT mapping is still
3199 * valid for that pointer access.
3200 */
3201 i915_gem_release_mmap(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003202
Chris Wilsonef55f922015-10-09 14:11:27 +01003203 /* As we no longer need a fence for GTT access,
3204 * we can relinquish it now (and so prevent having
3205 * to steal a fence from someone else on the next
3206 * fence request). Note GPU activity would have
3207 * dropped the fence as all snoopable access is
3208 * supposed to be linear.
3209 */
Chris Wilson49ef5292016-08-18 17:17:00 +01003210 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3211 ret = i915_vma_put_fence(vma);
3212 if (ret)
3213 return ret;
3214 }
Chris Wilsonef55f922015-10-09 14:11:27 +01003215 } else {
3216 /* We either have incoherent backing store and
3217 * so no GTT access or the architecture is fully
3218 * coherent. In such cases, existing GTT mmaps
3219 * ignore the cache bit in the PTE and we can
3220 * rewrite it without confusing the GPU or having
3221 * to force userspace to fault back in its mmaps.
3222 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003223 }
3224
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003225 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003226 if (!drm_mm_node_allocated(&vma->node))
3227 continue;
3228
3229 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3230 if (ret)
3231 return ret;
3232 }
Chris Wilsone4ffd172011-04-04 09:44:39 +01003233 }
3234
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003235 list_for_each_entry(vma, &obj->vma_list, obj_link)
Chris Wilson2c225692013-08-09 12:26:45 +01003236 vma->node.color = cache_level;
3237 obj->cache_level = cache_level;
3238
Ville Syrjäläed75a552015-08-11 19:47:10 +03003239out:
Chris Wilsonef55f922015-10-09 14:11:27 +01003240 /* Flush the dirty CPU caches to the backing storage so that the
3241 * object is now coherent at its new cache level (with respect
3242 * to the access domain).
3243 */
Chris Wilsond0da48c2016-11-06 12:59:59 +00003244 if (obj->cache_dirty && cpu_write_needs_clflush(obj))
3245 i915_gem_clflush_object(obj, true);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003246
Chris Wilsone4ffd172011-04-04 09:44:39 +01003247 return 0;
3248}
3249
Ben Widawsky199adf42012-09-21 17:01:20 -07003250int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3251 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003252{
Ben Widawsky199adf42012-09-21 17:01:20 -07003253 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003254 struct drm_i915_gem_object *obj;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003255 int err = 0;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003256
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003257 rcu_read_lock();
3258 obj = i915_gem_object_lookup_rcu(file, args->handle);
3259 if (!obj) {
3260 err = -ENOENT;
3261 goto out;
3262 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003263
Chris Wilson651d7942013-08-08 14:41:10 +01003264 switch (obj->cache_level) {
3265 case I915_CACHE_LLC:
3266 case I915_CACHE_L3_LLC:
3267 args->caching = I915_CACHING_CACHED;
3268 break;
3269
Chris Wilson4257d3b2013-08-08 14:41:11 +01003270 case I915_CACHE_WT:
3271 args->caching = I915_CACHING_DISPLAY;
3272 break;
3273
Chris Wilson651d7942013-08-08 14:41:10 +01003274 default:
3275 args->caching = I915_CACHING_NONE;
3276 break;
3277 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003278out:
3279 rcu_read_unlock();
3280 return err;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003281}
3282
Ben Widawsky199adf42012-09-21 17:01:20 -07003283int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3284 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003285{
Chris Wilson9c870d02016-10-24 13:42:15 +01003286 struct drm_i915_private *i915 = to_i915(dev);
Ben Widawsky199adf42012-09-21 17:01:20 -07003287 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003288 struct drm_i915_gem_object *obj;
3289 enum i915_cache_level level;
3290 int ret;
3291
Ben Widawsky199adf42012-09-21 17:01:20 -07003292 switch (args->caching) {
3293 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003294 level = I915_CACHE_NONE;
3295 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003296 case I915_CACHING_CACHED:
Imre Deake5756c12015-08-14 18:43:30 +03003297 /*
3298 * Due to a HW issue on BXT A stepping, GPU stores via a
3299 * snooped mapping may leave stale data in a corresponding CPU
3300 * cacheline, whereas normally such cachelines would get
3301 * invalidated.
3302 */
Chris Wilson9c870d02016-10-24 13:42:15 +01003303 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
Imre Deake5756c12015-08-14 18:43:30 +03003304 return -ENODEV;
3305
Chris Wilsone6994ae2012-07-10 10:27:08 +01003306 level = I915_CACHE_LLC;
3307 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003308 case I915_CACHING_DISPLAY:
Chris Wilson9c870d02016-10-24 13:42:15 +01003309 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003310 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003311 default:
3312 return -EINVAL;
3313 }
3314
Ben Widawsky3bc29132012-09-26 16:15:20 -07003315 ret = i915_mutex_lock_interruptible(dev);
3316 if (ret)
Chris Wilson9c870d02016-10-24 13:42:15 +01003317 return ret;
Ben Widawsky3bc29132012-09-26 16:15:20 -07003318
Chris Wilson03ac0642016-07-20 13:31:51 +01003319 obj = i915_gem_object_lookup(file, args->handle);
3320 if (!obj) {
Chris Wilsone6994ae2012-07-10 10:27:08 +01003321 ret = -ENOENT;
3322 goto unlock;
3323 }
3324
3325 ret = i915_gem_object_set_cache_level(obj, level);
Chris Wilsonf8c417c2016-07-20 13:31:53 +01003326 i915_gem_object_put(obj);
Chris Wilsone6994ae2012-07-10 10:27:08 +01003327unlock:
3328 mutex_unlock(&dev->struct_mutex);
3329 return ret;
3330}
3331
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003332/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003333 * Prepare buffer for display plane (scanout, cursors, etc).
3334 * Can be called from an uninterruptible phase (modesetting) and allows
3335 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003336 */
Chris Wilson058d88c2016-08-15 10:49:06 +01003337struct i915_vma *
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003338i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3339 u32 alignment,
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003340 const struct i915_ggtt_view *view)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003341{
Chris Wilson058d88c2016-08-15 10:49:06 +01003342 struct i915_vma *vma;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003343 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003344 int ret;
3345
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003346 lockdep_assert_held(&obj->base.dev->struct_mutex);
3347
Chris Wilsoncc98b412013-08-09 12:25:09 +01003348 /* Mark the pin_display early so that we account for the
3349 * display coherency whilst setting up the cache domains.
3350 */
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003351 obj->pin_display++;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003352
Eric Anholta7ef0642011-03-29 16:59:54 -07003353 /* The display engine is not coherent with the LLC cache on gen6. As
3354 * a result, we make sure that the pinning that is about to occur is
3355 * done with uncached PTEs. This is lowest common denominator for all
3356 * chipsets.
3357 *
3358 * However for gen6+, we could do better by using the GFDT bit instead
3359 * of uncaching, which would allow us to flush all the LLC-cached data
3360 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3361 */
Chris Wilson651d7942013-08-08 14:41:10 +01003362 ret = i915_gem_object_set_cache_level(obj,
Tvrtko Ursulin86527442016-10-13 11:03:00 +01003363 HAS_WT(to_i915(obj->base.dev)) ?
3364 I915_CACHE_WT : I915_CACHE_NONE);
Chris Wilson058d88c2016-08-15 10:49:06 +01003365 if (ret) {
3366 vma = ERR_PTR(ret);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003367 goto err_unpin_display;
Chris Wilson058d88c2016-08-15 10:49:06 +01003368 }
Eric Anholta7ef0642011-03-29 16:59:54 -07003369
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003370 /* As the user may map the buffer once pinned in the display plane
3371 * (e.g. libkms for the bootup splash), we have to ensure that we
Chris Wilson2efb8132016-08-18 17:17:06 +01003372 * always use map_and_fenceable for all scanout buffers. However,
3373 * it may simply be too big to fit into mappable, in which case
3374 * put it anyway and hope that userspace can cope (but always first
3375 * try to preserve the existing ABI).
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003376 */
Chris Wilson2efb8132016-08-18 17:17:06 +01003377 vma = ERR_PTR(-ENOSPC);
3378 if (view->type == I915_GGTT_VIEW_NORMAL)
3379 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3380 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson767a2222016-11-07 11:01:28 +00003381 if (IS_ERR(vma)) {
3382 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3383 unsigned int flags;
3384
3385 /* Valleyview is definitely limited to scanning out the first
3386 * 512MiB. Lets presume this behaviour was inherited from the
3387 * g4x display engine and that all earlier gen are similarly
3388 * limited. Testing suggests that it is a little more
3389 * complicated than this. For example, Cherryview appears quite
3390 * happy to scanout from anywhere within its global aperture.
3391 */
3392 flags = 0;
3393 if (HAS_GMCH_DISPLAY(i915))
3394 flags = PIN_MAPPABLE;
3395 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3396 }
Chris Wilson058d88c2016-08-15 10:49:06 +01003397 if (IS_ERR(vma))
Chris Wilsoncc98b412013-08-09 12:25:09 +01003398 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003399
Chris Wilsond8923dc2016-08-18 17:17:07 +01003400 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3401
Daniel Vettere62b59e2015-01-21 14:53:48 +01003402 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003403
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003404 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003405 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003406
3407 /* It should now be out of any other write domains, and we can update
3408 * the domain values for our changes.
3409 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003410 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003411 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003412
3413 trace_i915_gem_object_change_domain(obj,
3414 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003415 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003416
Chris Wilson058d88c2016-08-15 10:49:06 +01003417 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003418
3419err_unpin_display:
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003420 obj->pin_display--;
Chris Wilson058d88c2016-08-15 10:49:06 +01003421 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003422}
3423
3424void
Chris Wilson058d88c2016-08-15 10:49:06 +01003425i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003426{
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003427 lockdep_assert_held(&vma->vm->dev->struct_mutex);
3428
Chris Wilson058d88c2016-08-15 10:49:06 +01003429 if (WARN_ON(vma->obj->pin_display == 0))
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003430 return;
3431
Chris Wilsond8923dc2016-08-18 17:17:07 +01003432 if (--vma->obj->pin_display == 0)
3433 vma->display_alignment = 0;
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003434
Chris Wilson383d5822016-08-18 17:17:08 +01003435 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3436 if (!i915_vma_is_active(vma))
3437 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3438
Chris Wilson058d88c2016-08-15 10:49:06 +01003439 i915_vma_unpin(vma);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003440}
3441
Eric Anholte47c68e2008-11-14 13:35:19 -08003442/**
3443 * Moves a single object to the CPU read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003444 * @obj: object to act on
3445 * @write: requesting write or read-only access
Eric Anholte47c68e2008-11-14 13:35:19 -08003446 *
3447 * This function returns when the move is complete, including waiting on
3448 * flushes to occur.
3449 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003450int
Chris Wilson919926a2010-11-12 13:42:53 +00003451i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003452{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003453 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003454 int ret;
3455
Chris Wilsone95433c2016-10-28 13:58:27 +01003456 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003457
Chris Wilsone95433c2016-10-28 13:58:27 +01003458 ret = i915_gem_object_wait(obj,
3459 I915_WAIT_INTERRUPTIBLE |
3460 I915_WAIT_LOCKED |
3461 (write ? I915_WAIT_ALL : 0),
3462 MAX_SCHEDULE_TIMEOUT,
3463 NULL);
Chris Wilson88241782011-01-07 17:09:48 +00003464 if (ret)
3465 return ret;
3466
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003467 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3468 return 0;
3469
Eric Anholte47c68e2008-11-14 13:35:19 -08003470 i915_gem_object_flush_gtt_write_domain(obj);
3471
Chris Wilson05394f32010-11-08 19:18:58 +00003472 old_write_domain = obj->base.write_domain;
3473 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003474
Eric Anholte47c68e2008-11-14 13:35:19 -08003475 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003476 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003477 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003478
Chris Wilson05394f32010-11-08 19:18:58 +00003479 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003480 }
3481
3482 /* It should now be out of any other write domains, and we can update
3483 * the domain values for our changes.
3484 */
Chris Wilson40e62d52016-10-28 13:58:41 +01003485 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003486
3487 /* If we're writing through the CPU, then the GPU read domains will
3488 * need to be invalidated at next use.
3489 */
3490 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003491 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3492 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003493 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003494
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003495 trace_i915_gem_object_change_domain(obj,
3496 old_read_domains,
3497 old_write_domain);
3498
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003499 return 0;
3500}
3501
Eric Anholt673a3942008-07-30 12:06:12 -07003502/* Throttle our rendering by waiting until the ring has completed our requests
3503 * emitted over 20 msec ago.
3504 *
Eric Anholtb9624422009-06-03 07:27:35 +00003505 * Note that if we were to use the current jiffies each time around the loop,
3506 * we wouldn't escape the function with any frames outstanding if the time to
3507 * render a frame was over 20ms.
3508 *
Eric Anholt673a3942008-07-30 12:06:12 -07003509 * This should get us reasonable parallelism between CPU and GPU but also
3510 * relatively low latency when blocking on a particular request to finish.
3511 */
3512static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003513i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003514{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003515 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003516 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsond0bc54f2015-05-21 21:01:48 +01003517 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
John Harrison54fb2412014-11-24 18:49:27 +00003518 struct drm_i915_gem_request *request, *target = NULL;
Chris Wilsone95433c2016-10-28 13:58:27 +01003519 long ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003520
Chris Wilsonf4457ae2016-04-13 17:35:08 +01003521 /* ABI: return -EIO if already wedged */
3522 if (i915_terminally_wedged(&dev_priv->gpu_error))
3523 return -EIO;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003524
Chris Wilson1c255952010-09-26 11:03:27 +01003525 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003526 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003527 if (time_after_eq(request->emitted_jiffies, recent_enough))
3528 break;
3529
John Harrisonfcfa423c2015-05-29 17:44:12 +01003530 /*
3531 * Note that the request might not have been submitted yet.
3532 * In which case emitted_jiffies will be zero.
3533 */
3534 if (!request->emitted_jiffies)
3535 continue;
3536
John Harrison54fb2412014-11-24 18:49:27 +00003537 target = request;
Eric Anholtb9624422009-06-03 07:27:35 +00003538 }
John Harrisonff865882014-11-24 18:49:28 +00003539 if (target)
Chris Wilsone8a261e2016-07-20 13:31:49 +01003540 i915_gem_request_get(target);
Chris Wilson1c255952010-09-26 11:03:27 +01003541 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003542
John Harrison54fb2412014-11-24 18:49:27 +00003543 if (target == NULL)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003544 return 0;
3545
Chris Wilsone95433c2016-10-28 13:58:27 +01003546 ret = i915_wait_request(target,
3547 I915_WAIT_INTERRUPTIBLE,
3548 MAX_SCHEDULE_TIMEOUT);
Chris Wilsone8a261e2016-07-20 13:31:49 +01003549 i915_gem_request_put(target);
John Harrisonff865882014-11-24 18:49:28 +00003550
Chris Wilsone95433c2016-10-28 13:58:27 +01003551 return ret < 0 ? ret : 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003552}
3553
Chris Wilson058d88c2016-08-15 10:49:06 +01003554struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003555i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3556 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01003557 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003558 u64 alignment,
3559 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003560{
Chris Wilsonad16d2e2016-10-13 09:55:04 +01003561 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3562 struct i915_address_space *vm = &dev_priv->ggtt.base;
Chris Wilson59bfa122016-08-04 16:32:31 +01003563 struct i915_vma *vma;
3564 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003565
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003566 lockdep_assert_held(&obj->base.dev->struct_mutex);
3567
Chris Wilson058d88c2016-08-15 10:49:06 +01003568 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
Chris Wilson59bfa122016-08-04 16:32:31 +01003569 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +01003570 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01003571
3572 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3573 if (flags & PIN_NONBLOCK &&
3574 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
Chris Wilson058d88c2016-08-15 10:49:06 +01003575 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01003576
Chris Wilsonad16d2e2016-10-13 09:55:04 +01003577 if (flags & PIN_MAPPABLE) {
3578 u32 fence_size;
3579
3580 fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
3581 i915_gem_object_get_tiling(obj));
3582 /* If the required space is larger than the available
3583 * aperture, we will not able to find a slot for the
3584 * object and unbinding the object now will be in
3585 * vain. Worse, doing so may cause us to ping-pong
3586 * the object in and out of the Global GTT and
3587 * waste a lot of cycles under the mutex.
3588 */
3589 if (fence_size > dev_priv->ggtt.mappable_end)
3590 return ERR_PTR(-E2BIG);
3591
3592 /* If NONBLOCK is set the caller is optimistically
3593 * trying to cache the full object within the mappable
3594 * aperture, and *must* have a fallback in place for
3595 * situations where we cannot bind the object. We
3596 * can be a little more lax here and use the fallback
3597 * more often to avoid costly migrations of ourselves
3598 * and other objects within the aperture.
3599 *
3600 * Half-the-aperture is used as a simple heuristic.
3601 * More interesting would to do search for a free
3602 * block prior to making the commitment to unbind.
3603 * That caters for the self-harm case, and with a
3604 * little more heuristics (e.g. NOFAULT, NOEVICT)
3605 * we could try to minimise harm to others.
3606 */
3607 if (flags & PIN_NONBLOCK &&
3608 fence_size > dev_priv->ggtt.mappable_end / 2)
3609 return ERR_PTR(-ENOSPC);
3610 }
3611
Chris Wilson59bfa122016-08-04 16:32:31 +01003612 WARN(i915_vma_is_pinned(vma),
3613 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01003614 " offset=%08x, req.alignment=%llx,"
3615 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3616 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01003617 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01003618 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01003619 ret = i915_vma_unbind(vma);
3620 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01003621 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01003622 }
3623
Chris Wilson058d88c2016-08-15 10:49:06 +01003624 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3625 if (ret)
3626 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003627
Chris Wilson058d88c2016-08-15 10:49:06 +01003628 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003629}
3630
Chris Wilsonedf6b762016-08-09 09:23:33 +01003631static __always_inline unsigned int __busy_read_flag(unsigned int id)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003632{
3633 /* Note that we could alias engines in the execbuf API, but
3634 * that would be very unwise as it prevents userspace from
3635 * fine control over engine selection. Ahem.
3636 *
3637 * This should be something like EXEC_MAX_ENGINE instead of
3638 * I915_NUM_ENGINES.
3639 */
3640 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3641 return 0x10000 << id;
3642}
3643
3644static __always_inline unsigned int __busy_write_id(unsigned int id)
3645{
Chris Wilson70cb4722016-08-09 18:08:25 +01003646 /* The uABI guarantees an active writer is also amongst the read
3647 * engines. This would be true if we accessed the activity tracking
3648 * under the lock, but as we perform the lookup of the object and
3649 * its activity locklessly we can not guarantee that the last_write
3650 * being active implies that we have set the same engine flag from
3651 * last_read - hence we always set both read and write busy for
3652 * last_write.
3653 */
3654 return id | __busy_read_flag(id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003655}
3656
Chris Wilsonedf6b762016-08-09 09:23:33 +01003657static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01003658__busy_set_if_active(const struct dma_fence *fence,
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003659 unsigned int (*flag)(unsigned int id))
3660{
Chris Wilsond07f0e52016-10-28 13:58:44 +01003661 struct drm_i915_gem_request *rq;
Chris Wilson12555012016-08-16 09:50:40 +01003662
Chris Wilsond07f0e52016-10-28 13:58:44 +01003663 /* We have to check the current hw status of the fence as the uABI
3664 * guarantees forward progress. We could rely on the idle worker
3665 * to eventually flush us, but to minimise latency just ask the
3666 * hardware.
3667 *
3668 * Note we only report on the status of native fences.
3669 */
3670 if (!dma_fence_is_i915(fence))
Chris Wilson12555012016-08-16 09:50:40 +01003671 return 0;
3672
Chris Wilsond07f0e52016-10-28 13:58:44 +01003673 /* opencode to_request() in order to avoid const warnings */
3674 rq = container_of(fence, struct drm_i915_gem_request, fence);
3675 if (i915_gem_request_completed(rq))
3676 return 0;
3677
3678 return flag(rq->engine->exec_id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003679}
3680
Chris Wilsonedf6b762016-08-09 09:23:33 +01003681static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01003682busy_check_reader(const struct dma_fence *fence)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003683{
Chris Wilsond07f0e52016-10-28 13:58:44 +01003684 return __busy_set_if_active(fence, __busy_read_flag);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003685}
3686
Chris Wilsonedf6b762016-08-09 09:23:33 +01003687static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01003688busy_check_writer(const struct dma_fence *fence)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003689{
Chris Wilsond07f0e52016-10-28 13:58:44 +01003690 if (!fence)
3691 return 0;
3692
3693 return __busy_set_if_active(fence, __busy_write_id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003694}
3695
Eric Anholt673a3942008-07-30 12:06:12 -07003696int
Eric Anholt673a3942008-07-30 12:06:12 -07003697i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003698 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003699{
3700 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003701 struct drm_i915_gem_object *obj;
Chris Wilsond07f0e52016-10-28 13:58:44 +01003702 struct reservation_object_list *list;
3703 unsigned int seq;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003704 int err;
Eric Anholt673a3942008-07-30 12:06:12 -07003705
Chris Wilsond07f0e52016-10-28 13:58:44 +01003706 err = -ENOENT;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003707 rcu_read_lock();
3708 obj = i915_gem_object_lookup_rcu(file, args->handle);
Chris Wilsond07f0e52016-10-28 13:58:44 +01003709 if (!obj)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003710 goto out;
Chris Wilsond07f0e52016-10-28 13:58:44 +01003711
3712 /* A discrepancy here is that we do not report the status of
3713 * non-i915 fences, i.e. even though we may report the object as idle,
3714 * a call to set-domain may still stall waiting for foreign rendering.
3715 * This also means that wait-ioctl may report an object as busy,
3716 * where busy-ioctl considers it idle.
3717 *
3718 * We trade the ability to warn of foreign fences to report on which
3719 * i915 engines are active for the object.
3720 *
3721 * Alternatively, we can trade that extra information on read/write
3722 * activity with
3723 * args->busy =
3724 * !reservation_object_test_signaled_rcu(obj->resv, true);
3725 * to report the overall busyness. This is what the wait-ioctl does.
3726 *
3727 */
3728retry:
3729 seq = raw_read_seqcount(&obj->resv->seq);
3730
3731 /* Translate the exclusive fence to the READ *and* WRITE engine */
3732 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3733
3734 /* Translate shared fences to READ set of engines */
3735 list = rcu_dereference(obj->resv->fence);
3736 if (list) {
3737 unsigned int shared_count = list->shared_count, i;
3738
3739 for (i = 0; i < shared_count; ++i) {
3740 struct dma_fence *fence =
3741 rcu_dereference(list->shared[i]);
3742
3743 args->busy |= busy_check_reader(fence);
3744 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003745 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003746
Chris Wilsond07f0e52016-10-28 13:58:44 +01003747 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
3748 goto retry;
Chris Wilson426960b2016-01-15 16:51:46 +00003749
Chris Wilsond07f0e52016-10-28 13:58:44 +01003750 err = 0;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003751out:
3752 rcu_read_unlock();
3753 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07003754}
3755
3756int
3757i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3758 struct drm_file *file_priv)
3759{
Akshay Joshi0206e352011-08-16 15:34:10 -04003760 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003761}
3762
Chris Wilson3ef94da2009-09-14 16:50:29 +01003763int
3764i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3765 struct drm_file *file_priv)
3766{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003767 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01003768 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003769 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +01003770 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003771
3772 switch (args->madv) {
3773 case I915_MADV_DONTNEED:
3774 case I915_MADV_WILLNEED:
3775 break;
3776 default:
3777 return -EINVAL;
3778 }
3779
Chris Wilson03ac0642016-07-20 13:31:51 +01003780 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +01003781 if (!obj)
3782 return -ENOENT;
3783
3784 err = mutex_lock_interruptible(&obj->mm.lock);
3785 if (err)
3786 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003787
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003788 if (obj->mm.pages &&
Chris Wilson3e510a82016-08-05 10:14:23 +01003789 i915_gem_object_is_tiled(obj) &&
Daniel Vetter656bfa32014-11-20 09:26:30 +01003790 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +00003791 if (obj->mm.madv == I915_MADV_WILLNEED) {
3792 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003793 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00003794 obj->mm.quirked = false;
3795 }
3796 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00003797 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003798 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00003799 obj->mm.quirked = true;
3800 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01003801 }
3802
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003803 if (obj->mm.madv != __I915_MADV_PURGED)
3804 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003805
Chris Wilson6c085a72012-08-20 11:40:46 +02003806 /* if the object is no longer attached, discard its backing storage */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003807 if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003808 i915_gem_object_truncate(obj);
3809
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003810 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01003811 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003812
Chris Wilson1233e2d2016-10-28 13:58:37 +01003813out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01003814 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01003815 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003816}
3817
Chris Wilson37e680a2012-06-07 15:38:42 +01003818void i915_gem_object_init(struct drm_i915_gem_object *obj,
3819 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01003820{
Chris Wilson1233e2d2016-10-28 13:58:37 +01003821 mutex_init(&obj->mm.lock);
3822
Joonas Lahtinen56cea322016-11-02 12:16:04 +02003823 INIT_LIST_HEAD(&obj->global_link);
Chris Wilson275f0392016-10-24 13:42:14 +01003824 INIT_LIST_HEAD(&obj->userfault_link);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02003825 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07003826 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson8d9d5742015-04-07 16:20:38 +01003827 INIT_LIST_HEAD(&obj->batch_pool_link);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003828
Chris Wilson37e680a2012-06-07 15:38:42 +01003829 obj->ops = ops;
3830
Chris Wilsond07f0e52016-10-28 13:58:44 +01003831 reservation_object_init(&obj->__builtin_resv);
3832 obj->resv = &obj->__builtin_resv;
3833
Chris Wilson50349242016-08-18 17:17:04 +01003834 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003835
3836 obj->mm.madv = I915_MADV_WILLNEED;
3837 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
3838 mutex_init(&obj->mm.get_page.lock);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003839
Dave Gordonf19ec8c2016-07-04 11:34:37 +01003840 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003841}
3842
Chris Wilson37e680a2012-06-07 15:38:42 +01003843static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
Tvrtko Ursulin3599a912016-11-01 14:44:10 +00003844 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
3845 I915_GEM_OBJECT_IS_SHRINKABLE,
Chris Wilson37e680a2012-06-07 15:38:42 +01003846 .get_pages = i915_gem_object_get_pages_gtt,
3847 .put_pages = i915_gem_object_put_pages_gtt,
3848};
3849
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01003850/* Note we don't consider signbits :| */
3851#define overflows_type(x, T) \
3852 (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
3853
3854struct drm_i915_gem_object *
3855i915_gem_object_create(struct drm_device *dev, u64 size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003856{
Ville Syrjäläa26e5232016-10-31 22:37:19 +02003857 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00003858 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07003859 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01003860 gfp_t mask;
Chris Wilsonfe3db792016-04-25 13:32:13 +01003861 int ret;
Daniel Vetterc397b902010-04-09 19:05:07 +00003862
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01003863 /* There is a prevalence of the assumption that we fit the object's
3864 * page count inside a 32bit _signed_ variable. Let's document this and
3865 * catch if we ever need to fix it. In the meantime, if you do spot
3866 * such a local variable, please consider fixing!
3867 */
3868 if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
3869 return ERR_PTR(-E2BIG);
3870
3871 if (overflows_type(size, obj->base.size))
3872 return ERR_PTR(-E2BIG);
3873
Chris Wilson42dcedd2012-11-15 11:32:30 +00003874 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00003875 if (obj == NULL)
Chris Wilsonfe3db792016-04-25 13:32:13 +01003876 return ERR_PTR(-ENOMEM);
Daniel Vetterc397b902010-04-09 19:05:07 +00003877
Chris Wilsonfe3db792016-04-25 13:32:13 +01003878 ret = drm_gem_object_init(dev, &obj->base, size);
3879 if (ret)
3880 goto fail;
Daniel Vetterc397b902010-04-09 19:05:07 +00003881
Chris Wilsonbed1ea92012-05-24 20:48:12 +01003882 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
Ville Syrjäläa26e5232016-10-31 22:37:19 +02003883 if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
Chris Wilsonbed1ea92012-05-24 20:48:12 +01003884 /* 965gm cannot relocate objects above 4GiB. */
3885 mask &= ~__GFP_HIGHMEM;
3886 mask |= __GFP_DMA32;
3887 }
3888
Al Viro93c76a32015-12-04 23:45:44 -05003889 mapping = obj->base.filp->f_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01003890 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07003891
Chris Wilson37e680a2012-06-07 15:38:42 +01003892 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01003893
Daniel Vetterc397b902010-04-09 19:05:07 +00003894 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3895 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3896
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00003897 if (HAS_LLC(dev_priv)) {
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02003898 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07003899 * cache) for about a 10% performance improvement
3900 * compared to uncached. Graphics requests other than
3901 * display scanout are coherent with the CPU in
3902 * accessing this cache. This means in this mode we
3903 * don't need to clflush on the CPU side, and on the
3904 * GPU side we only need to flush internal caches to
3905 * get data visible to the CPU.
3906 *
3907 * However, we maintain the display planes as UC, and so
3908 * need to rebind when first used as such.
3909 */
3910 obj->cache_level = I915_CACHE_LLC;
3911 } else
3912 obj->cache_level = I915_CACHE_NONE;
3913
Daniel Vetterd861e332013-07-24 23:25:03 +02003914 trace_i915_gem_object_create(obj);
3915
Chris Wilson05394f32010-11-08 19:18:58 +00003916 return obj;
Chris Wilsonfe3db792016-04-25 13:32:13 +01003917
3918fail:
3919 i915_gem_object_free(obj);
Chris Wilsonfe3db792016-04-25 13:32:13 +01003920 return ERR_PTR(ret);
Daniel Vetterac52bc52010-04-09 19:05:06 +00003921}
3922
Chris Wilson340fbd82014-05-22 09:16:52 +01003923static bool discard_backing_storage(struct drm_i915_gem_object *obj)
3924{
3925 /* If we are the last user of the backing storage (be it shmemfs
3926 * pages or stolen etc), we know that the pages are going to be
3927 * immediately released. In this case, we can then skip copying
3928 * back the contents from the GPU.
3929 */
3930
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003931 if (obj->mm.madv != I915_MADV_WILLNEED)
Chris Wilson340fbd82014-05-22 09:16:52 +01003932 return false;
3933
3934 if (obj->base.filp == NULL)
3935 return true;
3936
3937 /* At first glance, this looks racy, but then again so would be
3938 * userspace racing mmap against close. However, the first external
3939 * reference to the filp can only be obtained through the
3940 * i915_gem_mmap_ioctl() which safeguards us against the user
3941 * acquiring such a reference whilst we are in the middle of
3942 * freeing the object.
3943 */
3944 return atomic_long_read(&obj->base.filp->f_count) == 1;
3945}
3946
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003947static void __i915_gem_free_objects(struct drm_i915_private *i915,
3948 struct llist_node *freed)
Chris Wilsonbe726152010-07-23 23:18:50 +01003949{
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003950 struct drm_i915_gem_object *obj, *on;
Chris Wilsonbe726152010-07-23 23:18:50 +01003951
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003952 mutex_lock(&i915->drm.struct_mutex);
3953 intel_runtime_pm_get(i915);
3954 llist_for_each_entry(obj, freed, freed) {
3955 struct i915_vma *vma, *vn;
Paulo Zanonif65c9162013-11-27 18:20:34 -02003956
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003957 trace_i915_gem_object_destroy(obj);
3958
3959 GEM_BUG_ON(i915_gem_object_is_active(obj));
3960 list_for_each_entry_safe(vma, vn,
3961 &obj->vma_list, obj_link) {
3962 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
3963 GEM_BUG_ON(i915_vma_is_active(vma));
3964 vma->flags &= ~I915_VMA_PIN_MASK;
3965 i915_vma_close(vma);
3966 }
Chris Wilsondb6c2b42016-11-01 11:54:00 +00003967 GEM_BUG_ON(!list_empty(&obj->vma_list));
3968 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003969
Joonas Lahtinen56cea322016-11-02 12:16:04 +02003970 list_del(&obj->global_link);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003971 }
3972 intel_runtime_pm_put(i915);
3973 mutex_unlock(&i915->drm.struct_mutex);
3974
3975 llist_for_each_entry_safe(obj, on, freed, freed) {
3976 GEM_BUG_ON(obj->bind_count);
3977 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
3978
3979 if (obj->ops->release)
3980 obj->ops->release(obj);
3981
3982 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
3983 atomic_set(&obj->mm.pages_pin_count, 0);
Chris Wilson548625e2016-11-01 12:11:34 +00003984 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003985 GEM_BUG_ON(obj->mm.pages);
3986
3987 if (obj->base.import_attach)
3988 drm_prime_gem_destroy(&obj->base, NULL);
3989
Chris Wilsond07f0e52016-10-28 13:58:44 +01003990 reservation_object_fini(&obj->__builtin_resv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003991 drm_gem_object_release(&obj->base);
3992 i915_gem_info_remove_obj(i915, obj->base.size);
3993
3994 kfree(obj->bit_17);
3995 i915_gem_object_free(obj);
3996 }
3997}
3998
3999static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4000{
4001 struct llist_node *freed;
4002
4003 freed = llist_del_all(&i915->mm.free_list);
4004 if (unlikely(freed))
4005 __i915_gem_free_objects(i915, freed);
4006}
4007
4008static void __i915_gem_free_work(struct work_struct *work)
4009{
4010 struct drm_i915_private *i915 =
4011 container_of(work, struct drm_i915_private, mm.free_work);
4012 struct llist_node *freed;
Chris Wilson26e12f82011-03-20 11:20:19 +00004013
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004014 /* All file-owned VMA should have been released by this point through
4015 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4016 * However, the object may also be bound into the global GTT (e.g.
4017 * older GPUs without per-process support, or for direct access through
4018 * the GTT either for the user or for scanout). Those VMA still need to
4019 * unbound now.
4020 */
Chris Wilson1488fc02012-04-24 15:47:31 +01004021
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004022 while ((freed = llist_del_all(&i915->mm.free_list)))
4023 __i915_gem_free_objects(i915, freed);
4024}
4025
4026static void __i915_gem_free_object_rcu(struct rcu_head *head)
4027{
4028 struct drm_i915_gem_object *obj =
4029 container_of(head, typeof(*obj), rcu);
4030 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4031
4032 /* We can't simply use call_rcu() from i915_gem_free_object()
4033 * as we need to block whilst unbinding, and the call_rcu
4034 * task may be called from softirq context. So we take a
4035 * detour through a worker.
4036 */
4037 if (llist_add(&obj->freed, &i915->mm.free_list))
4038 schedule_work(&i915->mm.free_work);
4039}
4040
4041void i915_gem_free_object(struct drm_gem_object *gem_obj)
4042{
4043 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4044
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004045 if (obj->mm.quirked)
4046 __i915_gem_object_unpin_pages(obj);
4047
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004048 if (discard_backing_storage(obj))
4049 obj->mm.madv = I915_MADV_DONTNEED;
Daniel Vettera071fa02014-06-18 23:28:09 +02004050
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004051 /* Before we free the object, make sure any pure RCU-only
4052 * read-side critical sections are complete, e.g.
4053 * i915_gem_busy_ioctl(). For the corresponding synchronized
4054 * lookup see i915_gem_object_lookup_rcu().
4055 */
4056 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
Chris Wilsonbe726152010-07-23 23:18:50 +01004057}
4058
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01004059void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4060{
4061 lockdep_assert_held(&obj->base.dev->struct_mutex);
4062
4063 GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
4064 if (i915_gem_object_is_active(obj))
4065 i915_gem_object_set_active_reference(obj);
4066 else
4067 i915_gem_object_put(obj);
4068}
4069
Chris Wilson3033aca2016-10-28 13:58:47 +01004070static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
4071{
4072 struct intel_engine_cs *engine;
4073 enum intel_engine_id id;
4074
4075 for_each_engine(engine, dev_priv, id)
4076 GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
4077}
4078
Chris Wilsondcff85c2016-08-05 10:14:11 +01004079int i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004080{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004081 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsondcff85c2016-08-05 10:14:11 +01004082 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004083
Chris Wilson54b4f682016-07-21 21:16:19 +01004084 intel_suspend_gt_powersave(dev_priv);
4085
Chris Wilson45c5f202013-10-16 11:50:01 +01004086 mutex_lock(&dev->struct_mutex);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004087
4088 /* We have to flush all the executing contexts to main memory so
4089 * that they can saved in the hibernation image. To ensure the last
4090 * context image is coherent, we have to switch away from it. That
4091 * leaves the dev_priv->kernel_context still active when
4092 * we actually suspend, and its image in memory may not match the GPU
4093 * state. Fortunately, the kernel_context is disposable and we do
4094 * not rely on its state.
4095 */
4096 ret = i915_gem_switch_to_kernel_context(dev_priv);
4097 if (ret)
4098 goto err;
4099
Chris Wilson22dd3bb2016-09-09 14:11:50 +01004100 ret = i915_gem_wait_for_idle(dev_priv,
4101 I915_WAIT_INTERRUPTIBLE |
4102 I915_WAIT_LOCKED);
Chris Wilsonf7403342013-09-13 23:57:04 +01004103 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004104 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004105
Chris Wilsonc0336662016-05-06 15:40:21 +01004106 i915_gem_retire_requests(dev_priv);
Chris Wilson28176ef2016-10-28 13:58:56 +01004107 GEM_BUG_ON(dev_priv->gt.active_requests);
Eric Anholt673a3942008-07-30 12:06:12 -07004108
Chris Wilson3033aca2016-10-28 13:58:47 +01004109 assert_kernel_context_is_current(dev_priv);
Chris Wilsonb2e862d2016-04-28 09:56:41 +01004110 i915_gem_context_lost(dev_priv);
Chris Wilson45c5f202013-10-16 11:50:01 +01004111 mutex_unlock(&dev->struct_mutex);
4112
Chris Wilson737b1502015-01-26 18:03:03 +02004113 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson67d97da2016-07-04 08:08:31 +01004114 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4115 flush_delayed_work(&dev_priv->gt.idle_work);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004116 flush_work(&dev_priv->mm.free_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004117
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004118 /* Assert that we sucessfully flushed all the work and
4119 * reset the GPU back to its idle, low power state.
4120 */
Chris Wilson67d97da2016-07-04 08:08:31 +01004121 WARN_ON(dev_priv->gt.awake);
Imre Deak31ab49a2016-11-07 11:20:05 +02004122 WARN_ON(!intel_execlists_idle(dev_priv));
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004123
Imre Deak1c777c52016-10-12 17:46:37 +03004124 /*
4125 * Neither the BIOS, ourselves or any other kernel
4126 * expects the system to be in execlists mode on startup,
4127 * so we need to reset the GPU back to legacy mode. And the only
4128 * known way to disable logical contexts is through a GPU reset.
4129 *
4130 * So in order to leave the system in a known default configuration,
4131 * always reset the GPU upon unload and suspend. Afterwards we then
4132 * clean up the GEM state tracking, flushing off the requests and
4133 * leaving the system in a known idle state.
4134 *
4135 * Note that is of the upmost importance that the GPU is idle and
4136 * all stray writes are flushed *before* we dismantle the backing
4137 * storage for the pinned objects.
4138 *
4139 * However, since we are uncertain that resetting the GPU on older
4140 * machines is a good idea, we don't - just in case it leaves the
4141 * machine in an unusable condition.
4142 */
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00004143 if (HAS_HW_CONTEXTS(dev_priv)) {
Imre Deak1c777c52016-10-12 17:46:37 +03004144 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
4145 WARN_ON(reset && reset != -ENODEV);
4146 }
4147
Eric Anholt673a3942008-07-30 12:06:12 -07004148 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004149
4150err:
4151 mutex_unlock(&dev->struct_mutex);
4152 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004153}
4154
Chris Wilson5ab57c72016-07-15 14:56:20 +01004155void i915_gem_resume(struct drm_device *dev)
4156{
4157 struct drm_i915_private *dev_priv = to_i915(dev);
4158
Imre Deak31ab49a2016-11-07 11:20:05 +02004159 WARN_ON(dev_priv->gt.awake);
4160
Chris Wilson5ab57c72016-07-15 14:56:20 +01004161 mutex_lock(&dev->struct_mutex);
4162 i915_gem_restore_gtt_mappings(dev);
4163
4164 /* As we didn't flush the kernel context before suspend, we cannot
4165 * guarantee that the context image is complete. So let's just reset
4166 * it and start again.
4167 */
Chris Wilson821ed7d2016-09-09 14:11:53 +01004168 dev_priv->gt.resume(dev_priv);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004169
4170 mutex_unlock(&dev->struct_mutex);
4171}
4172
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004173void i915_gem_init_swizzling(struct drm_device *dev)
4174{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004175 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004176
Daniel Vetter11782b02012-01-31 16:47:55 +01004177 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004178 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4179 return;
4180
4181 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4182 DISP_TILE_SURFACE_SWIZZLING);
4183
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004184 if (IS_GEN5(dev_priv))
Daniel Vetter11782b02012-01-31 16:47:55 +01004185 return;
4186
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004187 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004188 if (IS_GEN6(dev_priv))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004189 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004190 else if (IS_GEN7(dev_priv))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004191 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004192 else if (IS_GEN8(dev_priv))
Ben Widawsky31a53362013-11-02 21:07:04 -07004193 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004194 else
4195 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004196}
Daniel Vettere21af882012-02-09 20:53:27 +01004197
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004198static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004199{
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004200 I915_WRITE(RING_CTL(base), 0);
4201 I915_WRITE(RING_HEAD(base), 0);
4202 I915_WRITE(RING_TAIL(base), 0);
4203 I915_WRITE(RING_START(base), 0);
4204}
4205
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004206static void init_unused_rings(struct drm_i915_private *dev_priv)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004207{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004208 if (IS_I830(dev_priv)) {
4209 init_unused_ring(dev_priv, PRB1_BASE);
4210 init_unused_ring(dev_priv, SRB0_BASE);
4211 init_unused_ring(dev_priv, SRB1_BASE);
4212 init_unused_ring(dev_priv, SRB2_BASE);
4213 init_unused_ring(dev_priv, SRB3_BASE);
4214 } else if (IS_GEN2(dev_priv)) {
4215 init_unused_ring(dev_priv, SRB0_BASE);
4216 init_unused_ring(dev_priv, SRB1_BASE);
4217 } else if (IS_GEN3(dev_priv)) {
4218 init_unused_ring(dev_priv, PRB1_BASE);
4219 init_unused_ring(dev_priv, PRB2_BASE);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004220 }
4221}
4222
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004223int
4224i915_gem_init_hw(struct drm_device *dev)
4225{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004226 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004227 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05304228 enum intel_engine_id id;
Chris Wilsond200cda2016-04-28 09:56:44 +01004229 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004230
Chris Wilsonde867c22016-10-25 13:16:02 +01004231 dev_priv->gt.last_init_time = ktime_get();
4232
Chris Wilson5e4f5182015-02-13 14:35:59 +00004233 /* Double layer security blanket, see i915_gem_init() */
4234 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4235
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00004236 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004237 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004238
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01004239 if (IS_HASWELL(dev_priv))
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004240 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004241 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004242
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004243 if (HAS_PCH_NOP(dev_priv)) {
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01004244 if (IS_IVYBRIDGE(dev_priv)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004245 u32 temp = I915_READ(GEN7_MSG_CTL);
4246 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4247 I915_WRITE(GEN7_MSG_CTL, temp);
4248 } else if (INTEL_INFO(dev)->gen >= 7) {
4249 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4250 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4251 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4252 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004253 }
4254
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004255 i915_gem_init_swizzling(dev);
4256
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004257 /*
4258 * At least 830 can leave some of the unused rings
4259 * "active" (ie. head != tail) after resume which
4260 * will prevent c3 entry. Makes sure all unused rings
4261 * are totally idle.
4262 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004263 init_unused_rings(dev_priv);
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004264
Dave Gordoned54c1a2016-01-19 19:02:54 +00004265 BUG_ON(!dev_priv->kernel_context);
John Harrison90638cc2015-05-29 17:43:37 +01004266
John Harrison4ad2fd82015-06-18 13:11:20 +01004267 ret = i915_ppgtt_init_hw(dev);
4268 if (ret) {
4269 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4270 goto out;
4271 }
4272
4273 /* Need to do basic initialisation of all rings first: */
Akash Goel3b3f1652016-10-13 22:44:48 +05304274 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004275 ret = engine->init_hw(engine);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004276 if (ret)
Chris Wilson5e4f5182015-02-13 14:35:59 +00004277 goto out;
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004278 }
Mika Kuoppala99433932013-01-22 14:12:17 +02004279
Peter Antoine0ccdacf2016-04-13 15:03:25 +01004280 intel_mocs_init_l3cc_table(dev);
4281
Alex Dai33a732f2015-08-12 15:43:36 +01004282 /* We can't enable contexts until all firmware is loaded */
Dave Gordone556f7c2016-06-07 09:14:49 +01004283 ret = intel_guc_setup(dev);
4284 if (ret)
4285 goto out;
Alex Dai33a732f2015-08-12 15:43:36 +01004286
Chris Wilson5e4f5182015-02-13 14:35:59 +00004287out:
4288 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004289 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004290}
4291
Chris Wilson39df9192016-07-20 13:31:57 +01004292bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4293{
4294 if (INTEL_INFO(dev_priv)->gen < 6)
4295 return false;
4296
4297 /* TODO: make semaphores and Execlists play nicely together */
4298 if (i915.enable_execlists)
4299 return false;
4300
4301 if (value >= 0)
4302 return value;
4303
4304#ifdef CONFIG_INTEL_IOMMU
4305 /* Enable semaphores on SNB when IO remapping is off */
4306 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4307 return false;
4308#endif
4309
4310 return true;
4311}
4312
Chris Wilson1070a422012-04-24 15:47:41 +01004313int i915_gem_init(struct drm_device *dev)
4314{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004315 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson1070a422012-04-24 15:47:41 +01004316 int ret;
4317
Chris Wilson1070a422012-04-24 15:47:41 +01004318 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004319
Oscar Mateoa83014d2014-07-24 17:04:21 +01004320 if (!i915.enable_execlists) {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004321 dev_priv->gt.resume = intel_legacy_submission_resume;
Chris Wilson7e37f882016-08-02 22:50:21 +01004322 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
Oscar Mateo454afeb2014-07-24 17:04:22 +01004323 } else {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004324 dev_priv->gt.resume = intel_lr_context_resume;
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004325 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
Oscar Mateoa83014d2014-07-24 17:04:21 +01004326 }
4327
Chris Wilson5e4f5182015-02-13 14:35:59 +00004328 /* This is just a security blanket to placate dragons.
4329 * On some systems, we very sporadically observe that the first TLBs
4330 * used by the CS may be stale, despite us poking the TLB reset. If
4331 * we hold the forcewake during initialisation these problems
4332 * just magically go away.
4333 */
4334 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4335
Chris Wilson72778cb2016-05-19 16:17:16 +01004336 i915_gem_init_userptr(dev_priv);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01004337
4338 ret = i915_gem_init_ggtt(dev_priv);
4339 if (ret)
4340 goto out_unlock;
Jesse Barnesd62b4892013-03-08 10:45:53 -08004341
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004342 ret = i915_gem_context_init(dev);
Jani Nikula7bcc3772014-12-05 14:17:42 +02004343 if (ret)
4344 goto out_unlock;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004345
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01004346 ret = intel_engines_init(dev);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004347 if (ret)
Jani Nikula7bcc3772014-12-05 14:17:42 +02004348 goto out_unlock;
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004349
4350 ret = i915_gem_init_hw(dev);
Chris Wilson60990322014-04-09 09:19:42 +01004351 if (ret == -EIO) {
Chris Wilson7e21d642016-07-27 09:07:29 +01004352 /* Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01004353 * wedged. But we only want to do this where the GPU is angry,
4354 * for all other failure, such as an allocation failure, bail.
4355 */
4356 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
Chris Wilson821ed7d2016-09-09 14:11:53 +01004357 i915_gem_set_wedged(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01004358 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01004359 }
Jani Nikula7bcc3772014-12-05 14:17:42 +02004360
4361out_unlock:
Chris Wilson5e4f5182015-02-13 14:35:59 +00004362 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Chris Wilson60990322014-04-09 09:19:42 +01004363 mutex_unlock(&dev->struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01004364
Chris Wilson60990322014-04-09 09:19:42 +01004365 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01004366}
4367
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004368void
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004369i915_gem_cleanup_engines(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004370{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004371 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004372 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05304373 enum intel_engine_id id;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004374
Akash Goel3b3f1652016-10-13 22:44:48 +05304375 for_each_engine(engine, dev_priv, id)
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004376 dev_priv->gt.cleanup_engine(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004377}
4378
Eric Anholt673a3942008-07-30 12:06:12 -07004379void
Imre Deak40ae4e12016-03-16 14:54:03 +02004380i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4381{
Chris Wilson91c8a322016-07-05 10:40:23 +01004382 struct drm_device *dev = &dev_priv->drm;
Chris Wilson49ef5292016-08-18 17:17:00 +01004383 int i;
Imre Deak40ae4e12016-03-16 14:54:03 +02004384
4385 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4386 !IS_CHERRYVIEW(dev_priv))
4387 dev_priv->num_fence_regs = 32;
4388 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4389 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4390 dev_priv->num_fence_regs = 16;
4391 else
4392 dev_priv->num_fence_regs = 8;
4393
Chris Wilsonc0336662016-05-06 15:40:21 +01004394 if (intel_vgpu_active(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02004395 dev_priv->num_fence_regs =
4396 I915_READ(vgtif_reg(avail_rs.fence_num));
4397
4398 /* Initialize fence registers to zero */
Chris Wilson49ef5292016-08-18 17:17:00 +01004399 for (i = 0; i < dev_priv->num_fence_regs; i++) {
4400 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4401
4402 fence->i915 = dev_priv;
4403 fence->id = i;
4404 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4405 }
Imre Deak40ae4e12016-03-16 14:54:03 +02004406 i915_gem_restore_fences(dev);
4407
4408 i915_gem_detect_bit_6_swizzle(dev);
4409}
4410
Chris Wilson73cb9702016-10-28 13:58:46 +01004411int
Imre Deakd64aa092016-01-19 15:26:29 +02004412i915_gem_load_init(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004413{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004414 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004415 int err = -ENOMEM;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004416
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004417 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
4418 if (!dev_priv->objects)
Chris Wilson73cb9702016-10-28 13:58:46 +01004419 goto err_out;
Chris Wilson73cb9702016-10-28 13:58:46 +01004420
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004421 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
4422 if (!dev_priv->vmas)
Chris Wilson73cb9702016-10-28 13:58:46 +01004423 goto err_objects;
Chris Wilson73cb9702016-10-28 13:58:46 +01004424
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004425 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
4426 SLAB_HWCACHE_ALIGN |
4427 SLAB_RECLAIM_ACCOUNT |
4428 SLAB_DESTROY_BY_RCU);
4429 if (!dev_priv->requests)
Chris Wilson73cb9702016-10-28 13:58:46 +01004430 goto err_vmas;
Chris Wilson73cb9702016-10-28 13:58:46 +01004431
4432 mutex_lock(&dev_priv->drm.struct_mutex);
4433 INIT_LIST_HEAD(&dev_priv->gt.timelines);
4434 err = i915_gem_timeline_init(dev_priv,
4435 &dev_priv->gt.global_timeline,
4436 "[execution]");
4437 mutex_unlock(&dev_priv->drm.struct_mutex);
4438 if (err)
4439 goto err_requests;
Eric Anholt673a3942008-07-30 12:06:12 -07004440
Ben Widawskya33afea2013-09-17 21:12:45 -07004441 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004442 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
4443 init_llist_head(&dev_priv->mm.free_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004444 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4445 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004446 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson275f0392016-10-24 13:42:14 +01004447 INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
Chris Wilson67d97da2016-07-04 08:08:31 +01004448 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
Eric Anholt673a3942008-07-30 12:06:12 -07004449 i915_gem_retire_work_handler);
Chris Wilson67d97da2016-07-04 08:08:31 +01004450 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004451 i915_gem_idle_work_handler);
Chris Wilson1f15b762016-07-01 17:23:14 +01004452 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004453 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004454
Chris Wilson72bfa192010-12-19 11:42:05 +00004455 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4456
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004457 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004458
Chris Wilsonce453d82011-02-21 14:43:56 +00004459 dev_priv->mm.interruptible = true;
4460
Joonas Lahtinen6f633402016-09-01 14:58:21 +03004461 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4462
Chris Wilsonb5add952016-08-04 16:32:36 +01004463 spin_lock_init(&dev_priv->fb_tracking.lock);
Chris Wilson73cb9702016-10-28 13:58:46 +01004464
4465 return 0;
4466
4467err_requests:
4468 kmem_cache_destroy(dev_priv->requests);
4469err_vmas:
4470 kmem_cache_destroy(dev_priv->vmas);
4471err_objects:
4472 kmem_cache_destroy(dev_priv->objects);
4473err_out:
4474 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07004475}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004476
Imre Deakd64aa092016-01-19 15:26:29 +02004477void i915_gem_load_cleanup(struct drm_device *dev)
4478{
4479 struct drm_i915_private *dev_priv = to_i915(dev);
4480
Chris Wilson7d5d59e2016-11-01 08:48:41 +00004481 WARN_ON(!llist_empty(&dev_priv->mm.free_list));
4482
Imre Deakd64aa092016-01-19 15:26:29 +02004483 kmem_cache_destroy(dev_priv->requests);
4484 kmem_cache_destroy(dev_priv->vmas);
4485 kmem_cache_destroy(dev_priv->objects);
Chris Wilson0eafec62016-08-04 16:32:41 +01004486
4487 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4488 rcu_barrier();
Imre Deakd64aa092016-01-19 15:26:29 +02004489}
4490
Chris Wilson6a800ea2016-09-21 14:51:07 +01004491int i915_gem_freeze(struct drm_i915_private *dev_priv)
4492{
4493 intel_runtime_pm_get(dev_priv);
4494
4495 mutex_lock(&dev_priv->drm.struct_mutex);
4496 i915_gem_shrink_all(dev_priv);
4497 mutex_unlock(&dev_priv->drm.struct_mutex);
4498
4499 intel_runtime_pm_put(dev_priv);
4500
4501 return 0;
4502}
4503
Chris Wilson461fb992016-05-14 07:26:33 +01004504int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4505{
4506 struct drm_i915_gem_object *obj;
Chris Wilson7aab2d52016-09-09 20:02:18 +01004507 struct list_head *phases[] = {
4508 &dev_priv->mm.unbound_list,
4509 &dev_priv->mm.bound_list,
4510 NULL
4511 }, **p;
Chris Wilson461fb992016-05-14 07:26:33 +01004512
4513 /* Called just before we write the hibernation image.
4514 *
4515 * We need to update the domain tracking to reflect that the CPU
4516 * will be accessing all the pages to create and restore from the
4517 * hibernation, and so upon restoration those pages will be in the
4518 * CPU domain.
4519 *
4520 * To make sure the hibernation image contains the latest state,
4521 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01004522 *
4523 * To try and reduce the hibernation image, we manually shrink
4524 * the objects as well.
Chris Wilson461fb992016-05-14 07:26:33 +01004525 */
4526
Chris Wilson6a800ea2016-09-21 14:51:07 +01004527 mutex_lock(&dev_priv->drm.struct_mutex);
4528 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
Chris Wilson461fb992016-05-14 07:26:33 +01004529
Chris Wilson7aab2d52016-09-09 20:02:18 +01004530 for (p = phases; *p; p++) {
Joonas Lahtinen56cea322016-11-02 12:16:04 +02004531 list_for_each_entry(obj, *p, global_link) {
Chris Wilson7aab2d52016-09-09 20:02:18 +01004532 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4533 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4534 }
Chris Wilson461fb992016-05-14 07:26:33 +01004535 }
Chris Wilson6a800ea2016-09-21 14:51:07 +01004536 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson461fb992016-05-14 07:26:33 +01004537
4538 return 0;
4539}
4540
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004541void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004542{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004543 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilson15f7bbc2016-07-26 12:01:52 +01004544 struct drm_i915_gem_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00004545
4546 /* Clean up our request list when the client is going away, so that
4547 * later retire_requests won't dereference our soon-to-be-gone
4548 * file_priv.
4549 */
Chris Wilson1c255952010-09-26 11:03:27 +01004550 spin_lock(&file_priv->mm.lock);
Chris Wilson15f7bbc2016-07-26 12:01:52 +01004551 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004552 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01004553 spin_unlock(&file_priv->mm.lock);
Chris Wilson31169712009-09-14 16:50:28 +01004554
Chris Wilson2e1b8732015-04-27 13:41:22 +01004555 if (!list_empty(&file_priv->rps.link)) {
Chris Wilson8d3afd72015-05-21 21:01:47 +01004556 spin_lock(&to_i915(dev)->rps.client_lock);
Chris Wilson2e1b8732015-04-27 13:41:22 +01004557 list_del(&file_priv->rps.link);
Chris Wilson8d3afd72015-05-21 21:01:47 +01004558 spin_unlock(&to_i915(dev)->rps.client_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004559 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004560}
4561
4562int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4563{
4564 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004565 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004566
4567 DRM_DEBUG_DRIVER("\n");
4568
4569 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4570 if (!file_priv)
4571 return -ENOMEM;
4572
4573 file->driver_priv = file_priv;
Dave Gordonf19ec8c2016-07-04 11:34:37 +01004574 file_priv->dev_priv = to_i915(dev);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02004575 file_priv->file = file;
Chris Wilson2e1b8732015-04-27 13:41:22 +01004576 INIT_LIST_HEAD(&file_priv->rps.link);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004577
4578 spin_lock_init(&file_priv->mm.lock);
4579 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004580
Chris Wilsonc80ff162016-07-27 09:07:27 +01004581 file_priv->bsd_engine = -1;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00004582
Ben Widawskye422b882013-12-06 14:10:58 -08004583 ret = i915_gem_context_open(dev, file);
4584 if (ret)
4585 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004586
Ben Widawskye422b882013-12-06 14:10:58 -08004587 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004588}
4589
Daniel Vetterb680c372014-09-19 18:27:27 +02004590/**
4591 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07004592 * @old: current GEM buffer for the frontbuffer slots
4593 * @new: new GEM buffer for the frontbuffer slots
4594 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02004595 *
4596 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4597 * from @old and setting them in @new. Both @old and @new can be NULL.
4598 */
Daniel Vettera071fa02014-06-18 23:28:09 +02004599void i915_gem_track_fb(struct drm_i915_gem_object *old,
4600 struct drm_i915_gem_object *new,
4601 unsigned frontbuffer_bits)
4602{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004603 /* Control of individual bits within the mask are guarded by
4604 * the owning plane->mutex, i.e. we can never see concurrent
4605 * manipulation of individual bits. But since the bitfield as a whole
4606 * is updated using RMW, we need to use atomics in order to update
4607 * the bits.
4608 */
4609 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4610 sizeof(atomic_t) * BITS_PER_BYTE);
4611
Daniel Vettera071fa02014-06-18 23:28:09 +02004612 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004613 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4614 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02004615 }
4616
4617 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004618 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4619 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02004620 }
4621}
4622
Dave Gordonea702992015-07-09 19:29:02 +01004623/* Allocate a new GEM object and fill it with the supplied data */
4624struct drm_i915_gem_object *
4625i915_gem_object_create_from_data(struct drm_device *dev,
4626 const void *data, size_t size)
4627{
4628 struct drm_i915_gem_object *obj;
4629 struct sg_table *sg;
4630 size_t bytes;
4631 int ret;
4632
Dave Gordond37cd8a2016-04-22 19:14:32 +01004633 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
Chris Wilsonfe3db792016-04-25 13:32:13 +01004634 if (IS_ERR(obj))
Dave Gordonea702992015-07-09 19:29:02 +01004635 return obj;
4636
4637 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4638 if (ret)
4639 goto fail;
4640
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004641 ret = i915_gem_object_pin_pages(obj);
Dave Gordonea702992015-07-09 19:29:02 +01004642 if (ret)
4643 goto fail;
4644
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004645 sg = obj->mm.pages;
Dave Gordonea702992015-07-09 19:29:02 +01004646 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004647 obj->mm.dirty = true; /* Backing store is now out of date */
Dave Gordonea702992015-07-09 19:29:02 +01004648 i915_gem_object_unpin_pages(obj);
4649
4650 if (WARN_ON(bytes != size)) {
4651 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4652 ret = -EFAULT;
4653 goto fail;
4654 }
4655
4656 return obj;
4657
4658fail:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01004659 i915_gem_object_put(obj);
Dave Gordonea702992015-07-09 19:29:02 +01004660 return ERR_PTR(ret);
4661}
Chris Wilson96d77632016-10-28 13:58:33 +01004662
4663struct scatterlist *
4664i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
4665 unsigned int n,
4666 unsigned int *offset)
4667{
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004668 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
Chris Wilson96d77632016-10-28 13:58:33 +01004669 struct scatterlist *sg;
4670 unsigned int idx, count;
4671
4672 might_sleep();
4673 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004674 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
Chris Wilson96d77632016-10-28 13:58:33 +01004675
4676 /* As we iterate forward through the sg, we record each entry in a
4677 * radixtree for quick repeated (backwards) lookups. If we have seen
4678 * this index previously, we will have an entry for it.
4679 *
4680 * Initial lookup is O(N), but this is amortized to O(1) for
4681 * sequential page access (where each new request is consecutive
4682 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
4683 * i.e. O(1) with a large constant!
4684 */
4685 if (n < READ_ONCE(iter->sg_idx))
4686 goto lookup;
4687
4688 mutex_lock(&iter->lock);
4689
4690 /* We prefer to reuse the last sg so that repeated lookup of this
4691 * (or the subsequent) sg are fast - comparing against the last
4692 * sg is faster than going through the radixtree.
4693 */
4694
4695 sg = iter->sg_pos;
4696 idx = iter->sg_idx;
4697 count = __sg_page_count(sg);
4698
4699 while (idx + count <= n) {
4700 unsigned long exception, i;
4701 int ret;
4702
4703 /* If we cannot allocate and insert this entry, or the
4704 * individual pages from this range, cancel updating the
4705 * sg_idx so that on this lookup we are forced to linearly
4706 * scan onwards, but on future lookups we will try the
4707 * insertion again (in which case we need to be careful of
4708 * the error return reporting that we have already inserted
4709 * this index).
4710 */
4711 ret = radix_tree_insert(&iter->radix, idx, sg);
4712 if (ret && ret != -EEXIST)
4713 goto scan;
4714
4715 exception =
4716 RADIX_TREE_EXCEPTIONAL_ENTRY |
4717 idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
4718 for (i = 1; i < count; i++) {
4719 ret = radix_tree_insert(&iter->radix, idx + i,
4720 (void *)exception);
4721 if (ret && ret != -EEXIST)
4722 goto scan;
4723 }
4724
4725 idx += count;
4726 sg = ____sg_next(sg);
4727 count = __sg_page_count(sg);
4728 }
4729
4730scan:
4731 iter->sg_pos = sg;
4732 iter->sg_idx = idx;
4733
4734 mutex_unlock(&iter->lock);
4735
4736 if (unlikely(n < idx)) /* insertion completed by another thread */
4737 goto lookup;
4738
4739 /* In case we failed to insert the entry into the radixtree, we need
4740 * to look beyond the current sg.
4741 */
4742 while (idx + count <= n) {
4743 idx += count;
4744 sg = ____sg_next(sg);
4745 count = __sg_page_count(sg);
4746 }
4747
4748 *offset = n - idx;
4749 return sg;
4750
4751lookup:
4752 rcu_read_lock();
4753
4754 sg = radix_tree_lookup(&iter->radix, n);
4755 GEM_BUG_ON(!sg);
4756
4757 /* If this index is in the middle of multi-page sg entry,
4758 * the radixtree will contain an exceptional entry that points
4759 * to the start of that range. We will return the pointer to
4760 * the base page and the offset of this page within the
4761 * sg entry's range.
4762 */
4763 *offset = 0;
4764 if (unlikely(radix_tree_exception(sg))) {
4765 unsigned long base =
4766 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
4767
4768 sg = radix_tree_lookup(&iter->radix, base);
4769 GEM_BUG_ON(!sg);
4770
4771 *offset = n - base;
4772 }
4773
4774 rcu_read_unlock();
4775
4776 return sg;
4777}
4778
4779struct page *
4780i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
4781{
4782 struct scatterlist *sg;
4783 unsigned int offset;
4784
4785 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
4786
4787 sg = i915_gem_object_get_sg(obj, n, &offset);
4788 return nth_page(sg_page(sg), offset);
4789}
4790
4791/* Like i915_gem_object_get_page(), but mark the returned page dirty */
4792struct page *
4793i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
4794 unsigned int n)
4795{
4796 struct page *page;
4797
4798 page = i915_gem_object_get_page(obj, n);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004799 if (!obj->mm.dirty)
Chris Wilson96d77632016-10-28 13:58:33 +01004800 set_page_dirty(page);
4801
4802 return page;
4803}
4804
4805dma_addr_t
4806i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
4807 unsigned long n)
4808{
4809 struct scatterlist *sg;
4810 unsigned int offset;
4811
4812 sg = i915_gem_object_get_sg(obj, n, &offset);
4813 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
4814}