| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1 | /* | 
| Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 2 | * Copyright © 2008-2015 Intel Corporation | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | * Permission is hereby granted, free of charge, to any person obtaining a | 
|  | 5 | * copy of this software and associated documentation files (the "Software"), | 
|  | 6 | * to deal in the Software without restriction, including without limitation | 
|  | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
|  | 8 | * and/or sell copies of the Software, and to permit persons to whom the | 
|  | 9 | * Software is furnished to do so, subject to the following conditions: | 
|  | 10 | * | 
|  | 11 | * The above copyright notice and this permission notice (including the next | 
|  | 12 | * paragraph) shall be included in all copies or substantial portions of the | 
|  | 13 | * Software. | 
|  | 14 | * | 
|  | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|  | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|  | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
|  | 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 
|  | 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 
|  | 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 
|  | 21 | * IN THE SOFTWARE. | 
|  | 22 | * | 
|  | 23 | * Authors: | 
|  | 24 | *    Eric Anholt <eric@anholt.net> | 
|  | 25 | * | 
|  | 26 | */ | 
|  | 27 |  | 
| David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 28 | #include <drm/drmP.h> | 
| David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 29 | #include <drm/drm_vma_manager.h> | 
| David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 30 | #include <drm/i915_drm.h> | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 31 | #include "i915_drv.h" | 
| Yu Zhang | eb82289 | 2015-02-10 19:05:49 +0800 | [diff] [blame] | 32 | #include "i915_vgpu.h" | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 33 | #include "i915_trace.h" | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 34 | #include "intel_drv.h" | 
| Chris Wilson | 5d723d7 | 2016-08-04 16:32:35 +0100 | [diff] [blame] | 35 | #include "intel_frontbuffer.h" | 
| Peter Antoine | 0ccdacf | 2016-04-13 15:03:25 +0100 | [diff] [blame] | 36 | #include "intel_mocs.h" | 
| Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 37 | #include <linux/dma-fence-array.h> | 
| Chris Wilson | c13d87e | 2016-07-20 09:21:15 +0100 | [diff] [blame] | 38 | #include <linux/reservation.h> | 
| Hugh Dickins | 5949eac | 2011-06-27 16:18:18 -0700 | [diff] [blame] | 39 | #include <linux/shmem_fs.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 40 | #include <linux/slab.h> | 
| Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 41 | #include <linux/stop_machine.h> | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 42 | #include <linux/swap.h> | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 43 | #include <linux/pci.h> | 
| Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 44 | #include <linux/dma-buf.h> | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 45 |  | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 46 | static void i915_gem_flush_free_objects(struct drm_i915_private *i915); | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 47 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); | 
| Daniel Vetter | e62b59e | 2015-01-21 14:53:48 +0100 | [diff] [blame] | 48 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | 
| Chris Wilson | 6105080 | 2012-04-17 15:31:31 +0100 | [diff] [blame] | 49 |  | 
| Chris Wilson | c76ce03 | 2013-08-08 14:41:03 +0100 | [diff] [blame] | 50 | static bool cpu_cache_is_coherent(struct drm_device *dev, | 
|  | 51 | enum i915_cache_level level) | 
|  | 52 | { | 
| Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 53 | return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE; | 
| Chris Wilson | c76ce03 | 2013-08-08 14:41:03 +0100 | [diff] [blame] | 54 | } | 
|  | 55 |  | 
| Chris Wilson | 2c22569 | 2013-08-09 12:26:45 +0100 | [diff] [blame] | 56 | static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) | 
|  | 57 | { | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 58 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) | 
|  | 59 | return false; | 
|  | 60 |  | 
| Chris Wilson | 2c22569 | 2013-08-09 12:26:45 +0100 | [diff] [blame] | 61 | if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) | 
|  | 62 | return true; | 
|  | 63 |  | 
|  | 64 | return obj->pin_display; | 
|  | 65 | } | 
|  | 66 |  | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 67 | static int | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 68 | insert_mappable_node(struct i915_ggtt *ggtt, | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 69 | struct drm_mm_node *node, u32 size) | 
|  | 70 | { | 
|  | 71 | memset(node, 0, sizeof(*node)); | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 72 | return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node, | 
| Chris Wilson | 85fd4f5 | 2016-12-05 14:29:36 +0000 | [diff] [blame] | 73 | size, 0, | 
|  | 74 | I915_COLOR_UNEVICTABLE, | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 75 | 0, ggtt->mappable_end, | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 76 | DRM_MM_SEARCH_DEFAULT, | 
|  | 77 | DRM_MM_CREATE_DEFAULT); | 
|  | 78 | } | 
|  | 79 |  | 
|  | 80 | static void | 
|  | 81 | remove_mappable_node(struct drm_mm_node *node) | 
|  | 82 | { | 
|  | 83 | drm_mm_remove_node(node); | 
|  | 84 | } | 
|  | 85 |  | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 86 | /* some bookkeeping */ | 
|  | 87 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | 
| Chris Wilson | 3ef7f22 | 2016-10-18 13:02:48 +0100 | [diff] [blame] | 88 | u64 size) | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 89 | { | 
| Daniel Vetter | c20e835 | 2013-07-24 22:40:23 +0200 | [diff] [blame] | 90 | spin_lock(&dev_priv->mm.object_stat_lock); | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 91 | dev_priv->mm.object_count++; | 
|  | 92 | dev_priv->mm.object_memory += size; | 
| Daniel Vetter | c20e835 | 2013-07-24 22:40:23 +0200 | [diff] [blame] | 93 | spin_unlock(&dev_priv->mm.object_stat_lock); | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 94 | } | 
|  | 95 |  | 
|  | 96 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | 
| Chris Wilson | 3ef7f22 | 2016-10-18 13:02:48 +0100 | [diff] [blame] | 97 | u64 size) | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 98 | { | 
| Daniel Vetter | c20e835 | 2013-07-24 22:40:23 +0200 | [diff] [blame] | 99 | spin_lock(&dev_priv->mm.object_stat_lock); | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 100 | dev_priv->mm.object_count--; | 
|  | 101 | dev_priv->mm.object_memory -= size; | 
| Daniel Vetter | c20e835 | 2013-07-24 22:40:23 +0200 | [diff] [blame] | 102 | spin_unlock(&dev_priv->mm.object_stat_lock); | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 103 | } | 
|  | 104 |  | 
| Chris Wilson | 21dd373 | 2011-01-26 15:55:56 +0000 | [diff] [blame] | 105 | static int | 
| Daniel Vetter | 33196de | 2012-11-14 17:14:05 +0100 | [diff] [blame] | 106 | i915_gem_wait_for_error(struct i915_gpu_error *error) | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 107 | { | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 108 | int ret; | 
|  | 109 |  | 
| Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 110 | might_sleep(); | 
|  | 111 |  | 
| Chris Wilson | d98c52c | 2016-04-13 17:35:05 +0100 | [diff] [blame] | 112 | if (!i915_reset_in_progress(error)) | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 113 | return 0; | 
|  | 114 |  | 
| Daniel Vetter | 0a6759c | 2012-07-04 22:18:41 +0200 | [diff] [blame] | 115 | /* | 
|  | 116 | * Only wait 10 seconds for the gpu reset to complete to avoid hanging | 
|  | 117 | * userspace. If it takes that long something really bad is going on and | 
|  | 118 | * we should simply try to bail out and fail as gracefully as possible. | 
|  | 119 | */ | 
| Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 120 | ret = wait_event_interruptible_timeout(error->reset_queue, | 
| Chris Wilson | d98c52c | 2016-04-13 17:35:05 +0100 | [diff] [blame] | 121 | !i915_reset_in_progress(error), | 
| Chris Wilson | b52992c | 2016-10-28 13:58:24 +0100 | [diff] [blame] | 122 | I915_RESET_TIMEOUT); | 
| Daniel Vetter | 0a6759c | 2012-07-04 22:18:41 +0200 | [diff] [blame] | 123 | if (ret == 0) { | 
|  | 124 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); | 
|  | 125 | return -EIO; | 
|  | 126 | } else if (ret < 0) { | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 127 | return ret; | 
| Chris Wilson | d98c52c | 2016-04-13 17:35:05 +0100 | [diff] [blame] | 128 | } else { | 
|  | 129 | return 0; | 
| Daniel Vetter | 0a6759c | 2012-07-04 22:18:41 +0200 | [diff] [blame] | 130 | } | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 131 | } | 
|  | 132 |  | 
| Chris Wilson | 54cf91d | 2010-11-25 18:00:26 +0000 | [diff] [blame] | 133 | int i915_mutex_lock_interruptible(struct drm_device *dev) | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 134 | { | 
| Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 135 | struct drm_i915_private *dev_priv = to_i915(dev); | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 136 | int ret; | 
|  | 137 |  | 
| Daniel Vetter | 33196de | 2012-11-14 17:14:05 +0100 | [diff] [blame] | 138 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 139 | if (ret) | 
|  | 140 | return ret; | 
|  | 141 |  | 
|  | 142 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 
|  | 143 | if (ret) | 
|  | 144 | return ret; | 
|  | 145 |  | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 146 | return 0; | 
|  | 147 | } | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 148 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 149 | int | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 150 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 151 | struct drm_file *file) | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 152 | { | 
| Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 153 | struct drm_i915_private *dev_priv = to_i915(dev); | 
| Joonas Lahtinen | 62106b4 | 2016-03-18 10:42:57 +0200 | [diff] [blame] | 154 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 
| Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 155 | struct drm_i915_gem_get_aperture *args = data; | 
| Tvrtko Ursulin | ca1543b | 2015-07-01 11:51:10 +0100 | [diff] [blame] | 156 | struct i915_vma *vma; | 
| Chris Wilson | 6299f99 | 2010-11-24 12:23:44 +0000 | [diff] [blame] | 157 | size_t pinned; | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 158 |  | 
| Chris Wilson | 6299f99 | 2010-11-24 12:23:44 +0000 | [diff] [blame] | 159 | pinned = 0; | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 160 | mutex_lock(&dev->struct_mutex); | 
| Chris Wilson | 1c7f4bc | 2016-02-26 11:03:19 +0000 | [diff] [blame] | 161 | list_for_each_entry(vma, &ggtt->base.active_list, vm_link) | 
| Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 162 | if (i915_vma_is_pinned(vma)) | 
| Tvrtko Ursulin | ca1543b | 2015-07-01 11:51:10 +0100 | [diff] [blame] | 163 | pinned += vma->node.size; | 
| Chris Wilson | 1c7f4bc | 2016-02-26 11:03:19 +0000 | [diff] [blame] | 164 | list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) | 
| Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 165 | if (i915_vma_is_pinned(vma)) | 
| Tvrtko Ursulin | ca1543b | 2015-07-01 11:51:10 +0100 | [diff] [blame] | 166 | pinned += vma->node.size; | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 167 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 168 |  | 
| Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 169 | args->aper_size = ggtt->base.total; | 
| Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 170 | args->aper_available_size = args->aper_size - pinned; | 
| Chris Wilson | 6299f99 | 2010-11-24 12:23:44 +0000 | [diff] [blame] | 171 |  | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 172 | return 0; | 
|  | 173 | } | 
|  | 174 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 175 | static struct sg_table * | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 176 | i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 177 | { | 
| Al Viro | 93c76a3 | 2015-12-04 23:45:44 -0500 | [diff] [blame] | 178 | struct address_space *mapping = obj->base.filp->f_mapping; | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 179 | drm_dma_handle_t *phys; | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 180 | struct sg_table *st; | 
|  | 181 | struct scatterlist *sg; | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 182 | char *vaddr; | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 183 | int i; | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 184 |  | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 185 | if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 186 | return ERR_PTR(-EINVAL); | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 187 |  | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 188 | /* Always aligning to the object size, allows a single allocation | 
|  | 189 | * to handle all possible callers, and given typical object sizes, | 
|  | 190 | * the alignment of the buddy allocation will naturally match. | 
|  | 191 | */ | 
|  | 192 | phys = drm_pci_alloc(obj->base.dev, | 
|  | 193 | obj->base.size, | 
|  | 194 | roundup_pow_of_two(obj->base.size)); | 
|  | 195 | if (!phys) | 
|  | 196 | return ERR_PTR(-ENOMEM); | 
|  | 197 |  | 
|  | 198 | vaddr = phys->vaddr; | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 199 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | 
|  | 200 | struct page *page; | 
|  | 201 | char *src; | 
|  | 202 |  | 
|  | 203 | page = shmem_read_mapping_page(mapping, i); | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 204 | if (IS_ERR(page)) { | 
|  | 205 | st = ERR_CAST(page); | 
|  | 206 | goto err_phys; | 
|  | 207 | } | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 208 |  | 
|  | 209 | src = kmap_atomic(page); | 
|  | 210 | memcpy(vaddr, src, PAGE_SIZE); | 
|  | 211 | drm_clflush_virt_range(vaddr, PAGE_SIZE); | 
|  | 212 | kunmap_atomic(src); | 
|  | 213 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 214 | put_page(page); | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 215 | vaddr += PAGE_SIZE; | 
|  | 216 | } | 
|  | 217 |  | 
| Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 218 | i915_gem_chipset_flush(to_i915(obj->base.dev)); | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 219 |  | 
|  | 220 | st = kmalloc(sizeof(*st), GFP_KERNEL); | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 221 | if (!st) { | 
|  | 222 | st = ERR_PTR(-ENOMEM); | 
|  | 223 | goto err_phys; | 
|  | 224 | } | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 225 |  | 
|  | 226 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { | 
|  | 227 | kfree(st); | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 228 | st = ERR_PTR(-ENOMEM); | 
|  | 229 | goto err_phys; | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 230 | } | 
|  | 231 |  | 
|  | 232 | sg = st->sgl; | 
|  | 233 | sg->offset = 0; | 
|  | 234 | sg->length = obj->base.size; | 
|  | 235 |  | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 236 | sg_dma_address(sg) = phys->busaddr; | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 237 | sg_dma_len(sg) = obj->base.size; | 
|  | 238 |  | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 239 | obj->phys_handle = phys; | 
|  | 240 | return st; | 
|  | 241 |  | 
|  | 242 | err_phys: | 
|  | 243 | drm_pci_free(obj->base.dev, phys); | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 244 | return st; | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 245 | } | 
|  | 246 |  | 
|  | 247 | static void | 
| Chris Wilson | 2b3c831 | 2016-11-11 14:58:09 +0000 | [diff] [blame] | 248 | __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, | 
| Chris Wilson | e5facdf | 2016-12-23 14:57:57 +0000 | [diff] [blame] | 249 | struct sg_table *pages, | 
|  | 250 | bool needs_clflush) | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 251 | { | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 252 | GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 253 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 254 | if (obj->mm.madv == I915_MADV_DONTNEED) | 
|  | 255 | obj->mm.dirty = false; | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 256 |  | 
| Chris Wilson | e5facdf | 2016-12-23 14:57:57 +0000 | [diff] [blame] | 257 | if (needs_clflush && | 
|  | 258 | (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && | 
| Chris Wilson | 05c3483 | 2016-11-18 21:17:47 +0000 | [diff] [blame] | 259 | !cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) | 
| Chris Wilson | 2b3c831 | 2016-11-11 14:58:09 +0000 | [diff] [blame] | 260 | drm_clflush_sg(pages); | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 261 |  | 
|  | 262 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 
|  | 263 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 
|  | 264 | } | 
|  | 265 |  | 
|  | 266 | static void | 
|  | 267 | i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, | 
|  | 268 | struct sg_table *pages) | 
|  | 269 | { | 
| Chris Wilson | e5facdf | 2016-12-23 14:57:57 +0000 | [diff] [blame] | 270 | __i915_gem_object_release_shmem(obj, pages, false); | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 271 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 272 | if (obj->mm.dirty) { | 
| Al Viro | 93c76a3 | 2015-12-04 23:45:44 -0500 | [diff] [blame] | 273 | struct address_space *mapping = obj->base.filp->f_mapping; | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 274 | char *vaddr = obj->phys_handle->vaddr; | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 275 | int i; | 
|  | 276 |  | 
|  | 277 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 278 | struct page *page; | 
|  | 279 | char *dst; | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 280 |  | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 281 | page = shmem_read_mapping_page(mapping, i); | 
|  | 282 | if (IS_ERR(page)) | 
|  | 283 | continue; | 
|  | 284 |  | 
|  | 285 | dst = kmap_atomic(page); | 
|  | 286 | drm_clflush_virt_range(vaddr, PAGE_SIZE); | 
|  | 287 | memcpy(dst, vaddr, PAGE_SIZE); | 
|  | 288 | kunmap_atomic(dst); | 
|  | 289 |  | 
|  | 290 | set_page_dirty(page); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 291 | if (obj->mm.madv == I915_MADV_WILLNEED) | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 292 | mark_page_accessed(page); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 293 | put_page(page); | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 294 | vaddr += PAGE_SIZE; | 
|  | 295 | } | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 296 | obj->mm.dirty = false; | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 297 | } | 
|  | 298 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 299 | sg_free_table(pages); | 
|  | 300 | kfree(pages); | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 301 |  | 
|  | 302 | drm_pci_free(obj->base.dev, obj->phys_handle); | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 303 | } | 
|  | 304 |  | 
|  | 305 | static void | 
|  | 306 | i915_gem_object_release_phys(struct drm_i915_gem_object *obj) | 
|  | 307 | { | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 308 | i915_gem_object_unpin_pages(obj); | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 309 | } | 
|  | 310 |  | 
|  | 311 | static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { | 
|  | 312 | .get_pages = i915_gem_object_get_pages_phys, | 
|  | 313 | .put_pages = i915_gem_object_put_pages_phys, | 
|  | 314 | .release = i915_gem_object_release_phys, | 
|  | 315 | }; | 
|  | 316 |  | 
| Chris Wilson | 35a9611 | 2016-08-14 18:44:40 +0100 | [diff] [blame] | 317 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj) | 
| Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 318 | { | 
|  | 319 | struct i915_vma *vma; | 
|  | 320 | LIST_HEAD(still_in_list); | 
| Chris Wilson | 02bef8f | 2016-08-14 18:44:41 +0100 | [diff] [blame] | 321 | int ret; | 
| Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 322 |  | 
| Chris Wilson | 02bef8f | 2016-08-14 18:44:41 +0100 | [diff] [blame] | 323 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 
|  | 324 |  | 
|  | 325 | /* Closed vma are removed from the obj->vma_list - but they may | 
|  | 326 | * still have an active binding on the object. To remove those we | 
|  | 327 | * must wait for all rendering to complete to the object (as unbinding | 
|  | 328 | * must anyway), and retire the requests. | 
| Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 329 | */ | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 330 | ret = i915_gem_object_wait(obj, | 
|  | 331 | I915_WAIT_INTERRUPTIBLE | | 
|  | 332 | I915_WAIT_LOCKED | | 
|  | 333 | I915_WAIT_ALL, | 
|  | 334 | MAX_SCHEDULE_TIMEOUT, | 
|  | 335 | NULL); | 
| Chris Wilson | 02bef8f | 2016-08-14 18:44:41 +0100 | [diff] [blame] | 336 | if (ret) | 
|  | 337 | return ret; | 
|  | 338 |  | 
|  | 339 | i915_gem_retire_requests(to_i915(obj->base.dev)); | 
|  | 340 |  | 
| Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 341 | while ((vma = list_first_entry_or_null(&obj->vma_list, | 
|  | 342 | struct i915_vma, | 
|  | 343 | obj_link))) { | 
|  | 344 | list_move_tail(&vma->obj_link, &still_in_list); | 
|  | 345 | ret = i915_vma_unbind(vma); | 
|  | 346 | if (ret) | 
|  | 347 | break; | 
|  | 348 | } | 
|  | 349 | list_splice(&still_in_list, &obj->vma_list); | 
|  | 350 |  | 
|  | 351 | return ret; | 
|  | 352 | } | 
|  | 353 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 354 | static long | 
|  | 355 | i915_gem_object_wait_fence(struct dma_fence *fence, | 
|  | 356 | unsigned int flags, | 
|  | 357 | long timeout, | 
|  | 358 | struct intel_rps_client *rps) | 
|  | 359 | { | 
|  | 360 | struct drm_i915_gem_request *rq; | 
|  | 361 |  | 
|  | 362 | BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); | 
|  | 363 |  | 
|  | 364 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 
|  | 365 | return timeout; | 
|  | 366 |  | 
|  | 367 | if (!dma_fence_is_i915(fence)) | 
|  | 368 | return dma_fence_wait_timeout(fence, | 
|  | 369 | flags & I915_WAIT_INTERRUPTIBLE, | 
|  | 370 | timeout); | 
|  | 371 |  | 
|  | 372 | rq = to_request(fence); | 
|  | 373 | if (i915_gem_request_completed(rq)) | 
|  | 374 | goto out; | 
|  | 375 |  | 
|  | 376 | /* This client is about to stall waiting for the GPU. In many cases | 
|  | 377 | * this is undesirable and limits the throughput of the system, as | 
|  | 378 | * many clients cannot continue processing user input/output whilst | 
|  | 379 | * blocked. RPS autotuning may take tens of milliseconds to respond | 
|  | 380 | * to the GPU load and thus incurs additional latency for the client. | 
|  | 381 | * We can circumvent that by promoting the GPU frequency to maximum | 
|  | 382 | * before we wait. This makes the GPU throttle up much more quickly | 
|  | 383 | * (good for benchmarks and user experience, e.g. window animations), | 
|  | 384 | * but at a cost of spending more power processing the workload | 
|  | 385 | * (bad for battery). Not all clients even want their results | 
|  | 386 | * immediately and for them we should just let the GPU select its own | 
|  | 387 | * frequency to maximise efficiency. To prevent a single client from | 
|  | 388 | * forcing the clocks too high for the whole system, we only allow | 
|  | 389 | * each client to waitboost once in a busy period. | 
|  | 390 | */ | 
|  | 391 | if (rps) { | 
|  | 392 | if (INTEL_GEN(rq->i915) >= 6) | 
|  | 393 | gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies); | 
|  | 394 | else | 
|  | 395 | rps = NULL; | 
|  | 396 | } | 
|  | 397 |  | 
|  | 398 | timeout = i915_wait_request(rq, flags, timeout); | 
|  | 399 |  | 
|  | 400 | out: | 
|  | 401 | if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) | 
|  | 402 | i915_gem_request_retire_upto(rq); | 
|  | 403 |  | 
| Chris Wilson | cb399ea | 2016-11-01 10:03:16 +0000 | [diff] [blame] | 404 | if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) { | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 405 | /* The GPU is now idle and this client has stalled. | 
|  | 406 | * Since no other client has submitted a request in the | 
|  | 407 | * meantime, assume that this client is the only one | 
|  | 408 | * supplying work to the GPU but is unable to keep that | 
|  | 409 | * work supplied because it is waiting. Since the GPU is | 
|  | 410 | * then never kept fully busy, RPS autoclocking will | 
|  | 411 | * keep the clocks relatively low, causing further delays. | 
|  | 412 | * Compensate by giving the synchronous client credit for | 
|  | 413 | * a waitboost next time. | 
|  | 414 | */ | 
|  | 415 | spin_lock(&rq->i915->rps.client_lock); | 
|  | 416 | list_del_init(&rps->link); | 
|  | 417 | spin_unlock(&rq->i915->rps.client_lock); | 
|  | 418 | } | 
|  | 419 |  | 
|  | 420 | return timeout; | 
|  | 421 | } | 
|  | 422 |  | 
|  | 423 | static long | 
|  | 424 | i915_gem_object_wait_reservation(struct reservation_object *resv, | 
|  | 425 | unsigned int flags, | 
|  | 426 | long timeout, | 
|  | 427 | struct intel_rps_client *rps) | 
|  | 428 | { | 
|  | 429 | struct dma_fence *excl; | 
|  | 430 |  | 
|  | 431 | if (flags & I915_WAIT_ALL) { | 
|  | 432 | struct dma_fence **shared; | 
|  | 433 | unsigned int count, i; | 
|  | 434 | int ret; | 
|  | 435 |  | 
|  | 436 | ret = reservation_object_get_fences_rcu(resv, | 
|  | 437 | &excl, &count, &shared); | 
|  | 438 | if (ret) | 
|  | 439 | return ret; | 
|  | 440 |  | 
|  | 441 | for (i = 0; i < count; i++) { | 
|  | 442 | timeout = i915_gem_object_wait_fence(shared[i], | 
|  | 443 | flags, timeout, | 
|  | 444 | rps); | 
|  | 445 | if (timeout <= 0) | 
|  | 446 | break; | 
|  | 447 |  | 
|  | 448 | dma_fence_put(shared[i]); | 
|  | 449 | } | 
|  | 450 |  | 
|  | 451 | for (; i < count; i++) | 
|  | 452 | dma_fence_put(shared[i]); | 
|  | 453 | kfree(shared); | 
|  | 454 | } else { | 
|  | 455 | excl = reservation_object_get_excl_rcu(resv); | 
|  | 456 | } | 
|  | 457 |  | 
|  | 458 | if (excl && timeout > 0) | 
|  | 459 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); | 
|  | 460 |  | 
|  | 461 | dma_fence_put(excl); | 
|  | 462 |  | 
|  | 463 | return timeout; | 
|  | 464 | } | 
|  | 465 |  | 
| Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 466 | static void __fence_set_priority(struct dma_fence *fence, int prio) | 
|  | 467 | { | 
|  | 468 | struct drm_i915_gem_request *rq; | 
|  | 469 | struct intel_engine_cs *engine; | 
|  | 470 |  | 
|  | 471 | if (!dma_fence_is_i915(fence)) | 
|  | 472 | return; | 
|  | 473 |  | 
|  | 474 | rq = to_request(fence); | 
|  | 475 | engine = rq->engine; | 
|  | 476 | if (!engine->schedule) | 
|  | 477 | return; | 
|  | 478 |  | 
|  | 479 | engine->schedule(rq, prio); | 
|  | 480 | } | 
|  | 481 |  | 
|  | 482 | static void fence_set_priority(struct dma_fence *fence, int prio) | 
|  | 483 | { | 
|  | 484 | /* Recurse once into a fence-array */ | 
|  | 485 | if (dma_fence_is_array(fence)) { | 
|  | 486 | struct dma_fence_array *array = to_dma_fence_array(fence); | 
|  | 487 | int i; | 
|  | 488 |  | 
|  | 489 | for (i = 0; i < array->num_fences; i++) | 
|  | 490 | __fence_set_priority(array->fences[i], prio); | 
|  | 491 | } else { | 
|  | 492 | __fence_set_priority(fence, prio); | 
|  | 493 | } | 
|  | 494 | } | 
|  | 495 |  | 
|  | 496 | int | 
|  | 497 | i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, | 
|  | 498 | unsigned int flags, | 
|  | 499 | int prio) | 
|  | 500 | { | 
|  | 501 | struct dma_fence *excl; | 
|  | 502 |  | 
|  | 503 | if (flags & I915_WAIT_ALL) { | 
|  | 504 | struct dma_fence **shared; | 
|  | 505 | unsigned int count, i; | 
|  | 506 | int ret; | 
|  | 507 |  | 
|  | 508 | ret = reservation_object_get_fences_rcu(obj->resv, | 
|  | 509 | &excl, &count, &shared); | 
|  | 510 | if (ret) | 
|  | 511 | return ret; | 
|  | 512 |  | 
|  | 513 | for (i = 0; i < count; i++) { | 
|  | 514 | fence_set_priority(shared[i], prio); | 
|  | 515 | dma_fence_put(shared[i]); | 
|  | 516 | } | 
|  | 517 |  | 
|  | 518 | kfree(shared); | 
|  | 519 | } else { | 
|  | 520 | excl = reservation_object_get_excl_rcu(obj->resv); | 
|  | 521 | } | 
|  | 522 |  | 
|  | 523 | if (excl) { | 
|  | 524 | fence_set_priority(excl, prio); | 
|  | 525 | dma_fence_put(excl); | 
|  | 526 | } | 
|  | 527 | return 0; | 
|  | 528 | } | 
|  | 529 |  | 
| Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 530 | /** | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 531 | * Waits for rendering to the object to be completed | 
| Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 532 | * @obj: i915 gem object | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 533 | * @flags: how to wait (under a lock, for all rendering or just for writes etc) | 
|  | 534 | * @timeout: how long to wait | 
|  | 535 | * @rps: client (user process) to charge for any waitboosting | 
| Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 536 | */ | 
|  | 537 | int | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 538 | i915_gem_object_wait(struct drm_i915_gem_object *obj, | 
|  | 539 | unsigned int flags, | 
|  | 540 | long timeout, | 
|  | 541 | struct intel_rps_client *rps) | 
| Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 542 | { | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 543 | might_sleep(); | 
|  | 544 | #if IS_ENABLED(CONFIG_LOCKDEP) | 
|  | 545 | GEM_BUG_ON(debug_locks && | 
|  | 546 | !!lockdep_is_held(&obj->base.dev->struct_mutex) != | 
|  | 547 | !!(flags & I915_WAIT_LOCKED)); | 
|  | 548 | #endif | 
|  | 549 | GEM_BUG_ON(timeout < 0); | 
| Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 550 |  | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 551 | timeout = i915_gem_object_wait_reservation(obj->resv, | 
|  | 552 | flags, timeout, | 
|  | 553 | rps); | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 554 | return timeout < 0 ? timeout : 0; | 
| Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 555 | } | 
|  | 556 |  | 
|  | 557 | static struct intel_rps_client *to_rps_client(struct drm_file *file) | 
|  | 558 | { | 
|  | 559 | struct drm_i915_file_private *fpriv = file->driver_priv; | 
|  | 560 |  | 
|  | 561 | return &fpriv->rps; | 
|  | 562 | } | 
|  | 563 |  | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 564 | int | 
|  | 565 | i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, | 
|  | 566 | int align) | 
|  | 567 | { | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 568 | int ret; | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 569 |  | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 570 | if (align > obj->base.size) | 
|  | 571 | return -EINVAL; | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 572 |  | 
| Chris Wilson | dbb4351 | 2016-12-07 13:34:11 +0000 | [diff] [blame] | 573 | if (obj->ops == &i915_gem_phys_ops) | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 574 | return 0; | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 575 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 576 | if (obj->mm.madv != I915_MADV_WILLNEED) | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 577 | return -EFAULT; | 
|  | 578 |  | 
|  | 579 | if (obj->base.filp == NULL) | 
|  | 580 | return -EINVAL; | 
|  | 581 |  | 
| Chris Wilson | 4717ca9 | 2016-08-04 07:52:28 +0100 | [diff] [blame] | 582 | ret = i915_gem_object_unbind(obj); | 
|  | 583 | if (ret) | 
|  | 584 | return ret; | 
|  | 585 |  | 
| Chris Wilson | 548625e | 2016-11-01 12:11:34 +0000 | [diff] [blame] | 586 | __i915_gem_object_put_pages(obj, I915_MM_NORMAL); | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 587 | if (obj->mm.pages) | 
|  | 588 | return -EBUSY; | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 589 |  | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 590 | obj->ops = &i915_gem_phys_ops; | 
|  | 591 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 592 | return i915_gem_object_pin_pages(obj); | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 593 | } | 
|  | 594 |  | 
|  | 595 | static int | 
|  | 596 | i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | 
|  | 597 | struct drm_i915_gem_pwrite *args, | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 598 | struct drm_file *file) | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 599 | { | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 600 | void *vaddr = obj->phys_handle->vaddr + args->offset; | 
| Gustavo Padovan | 3ed605b | 2016-04-26 12:32:27 -0300 | [diff] [blame] | 601 | char __user *user_data = u64_to_user_ptr(args->data_ptr); | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 602 |  | 
|  | 603 | /* We manually control the domain here and pretend that it | 
|  | 604 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. | 
|  | 605 | */ | 
| Rodrigo Vivi | 77a0d1c | 2015-06-18 11:43:24 -0700 | [diff] [blame] | 606 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); | 
| Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 607 | if (copy_from_user(vaddr, user_data, args->size)) | 
|  | 608 | return -EFAULT; | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 609 |  | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 610 | drm_clflush_virt_range(vaddr, args->size); | 
| Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 611 | i915_gem_chipset_flush(to_i915(obj->base.dev)); | 
| Paulo Zanoni | 063e4e6 | 2015-02-13 17:23:45 -0200 | [diff] [blame] | 612 |  | 
| Rodrigo Vivi | de152b6 | 2015-07-07 16:28:51 -0700 | [diff] [blame] | 613 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); | 
| Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 614 | return 0; | 
| Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 615 | } | 
|  | 616 |  | 
| Tvrtko Ursulin | 187685c | 2016-12-01 14:16:36 +0000 | [diff] [blame] | 617 | void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) | 
| Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 618 | { | 
| Chris Wilson | efab6d8 | 2015-04-07 16:20:57 +0100 | [diff] [blame] | 619 | return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); | 
| Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 620 | } | 
|  | 621 |  | 
|  | 622 | void i915_gem_object_free(struct drm_i915_gem_object *obj) | 
|  | 623 | { | 
| Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 624 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | 
| Chris Wilson | efab6d8 | 2015-04-07 16:20:57 +0100 | [diff] [blame] | 625 | kmem_cache_free(dev_priv->objects, obj); | 
| Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 626 | } | 
|  | 627 |  | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 628 | static int | 
|  | 629 | i915_gem_create(struct drm_file *file, | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 630 | struct drm_i915_private *dev_priv, | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 631 | uint64_t size, | 
|  | 632 | uint32_t *handle_p) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 633 | { | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 634 | struct drm_i915_gem_object *obj; | 
| Pekka Paalanen | a1a2d1d | 2009-08-23 12:40:55 +0300 | [diff] [blame] | 635 | int ret; | 
|  | 636 | u32 handle; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 637 |  | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 638 | size = roundup(size, PAGE_SIZE); | 
| Chris Wilson | 8ffc024 | 2011-09-14 14:14:28 +0200 | [diff] [blame] | 639 | if (size == 0) | 
|  | 640 | return -EINVAL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 641 |  | 
|  | 642 | /* Allocate the new object */ | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 643 | obj = i915_gem_object_create(dev_priv, size); | 
| Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 644 | if (IS_ERR(obj)) | 
|  | 645 | return PTR_ERR(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 646 |  | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 647 | ret = drm_gem_handle_create(file, &obj->base, &handle); | 
| Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 648 | /* drop reference from allocate - handle holds it now */ | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 649 | i915_gem_object_put(obj); | 
| Daniel Vetter | d861e33 | 2013-07-24 23:25:03 +0200 | [diff] [blame] | 650 | if (ret) | 
|  | 651 | return ret; | 
| Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 652 |  | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 653 | *handle_p = handle; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 654 | return 0; | 
|  | 655 | } | 
|  | 656 |  | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 657 | int | 
|  | 658 | i915_gem_dumb_create(struct drm_file *file, | 
|  | 659 | struct drm_device *dev, | 
|  | 660 | struct drm_mode_create_dumb *args) | 
|  | 661 | { | 
|  | 662 | /* have to work out size/pitch and return them */ | 
| Paulo Zanoni | de45eaf | 2013-10-18 18:48:24 -0300 | [diff] [blame] | 663 | args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 664 | args->size = args->pitch * args->height; | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 665 | return i915_gem_create(file, to_i915(dev), | 
| Dave Airlie | da6b51d | 2014-12-24 13:11:17 +1000 | [diff] [blame] | 666 | args->size, &args->handle); | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 667 | } | 
|  | 668 |  | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 669 | /** | 
|  | 670 | * Creates a new mm object and returns a handle to it. | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 671 | * @dev: drm device pointer | 
|  | 672 | * @data: ioctl data blob | 
|  | 673 | * @file: drm file pointer | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 674 | */ | 
|  | 675 | int | 
|  | 676 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | 
|  | 677 | struct drm_file *file) | 
|  | 678 | { | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 679 | struct drm_i915_private *dev_priv = to_i915(dev); | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 680 | struct drm_i915_gem_create *args = data; | 
| Daniel Vetter | 63ed2cb | 2012-04-23 16:50:50 +0200 | [diff] [blame] | 681 |  | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 682 | i915_gem_flush_free_objects(dev_priv); | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 683 |  | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 684 | return i915_gem_create(file, dev_priv, | 
| Dave Airlie | da6b51d | 2014-12-24 13:11:17 +1000 | [diff] [blame] | 685 | args->size, &args->handle); | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 686 | } | 
|  | 687 |  | 
| Daniel Vetter | 8c59967 | 2011-12-14 13:57:31 +0100 | [diff] [blame] | 688 | static inline int | 
| Daniel Vetter | 8461d22 | 2011-12-14 13:57:32 +0100 | [diff] [blame] | 689 | __copy_to_user_swizzled(char __user *cpu_vaddr, | 
|  | 690 | const char *gpu_vaddr, int gpu_offset, | 
|  | 691 | int length) | 
|  | 692 | { | 
|  | 693 | int ret, cpu_offset = 0; | 
|  | 694 |  | 
|  | 695 | while (length > 0) { | 
|  | 696 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | 
|  | 697 | int this_length = min(cacheline_end - gpu_offset, length); | 
|  | 698 | int swizzled_gpu_offset = gpu_offset ^ 64; | 
|  | 699 |  | 
|  | 700 | ret = __copy_to_user(cpu_vaddr + cpu_offset, | 
|  | 701 | gpu_vaddr + swizzled_gpu_offset, | 
|  | 702 | this_length); | 
|  | 703 | if (ret) | 
|  | 704 | return ret + length; | 
|  | 705 |  | 
|  | 706 | cpu_offset += this_length; | 
|  | 707 | gpu_offset += this_length; | 
|  | 708 | length -= this_length; | 
|  | 709 | } | 
|  | 710 |  | 
|  | 711 | return 0; | 
|  | 712 | } | 
|  | 713 |  | 
|  | 714 | static inline int | 
| Ben Widawsky | 4f0c7cf | 2012-04-16 14:07:47 -0700 | [diff] [blame] | 715 | __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, | 
|  | 716 | const char __user *cpu_vaddr, | 
| Daniel Vetter | 8c59967 | 2011-12-14 13:57:31 +0100 | [diff] [blame] | 717 | int length) | 
|  | 718 | { | 
|  | 719 | int ret, cpu_offset = 0; | 
|  | 720 |  | 
|  | 721 | while (length > 0) { | 
|  | 722 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | 
|  | 723 | int this_length = min(cacheline_end - gpu_offset, length); | 
|  | 724 | int swizzled_gpu_offset = gpu_offset ^ 64; | 
|  | 725 |  | 
|  | 726 | ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, | 
|  | 727 | cpu_vaddr + cpu_offset, | 
|  | 728 | this_length); | 
|  | 729 | if (ret) | 
|  | 730 | return ret + length; | 
|  | 731 |  | 
|  | 732 | cpu_offset += this_length; | 
|  | 733 | gpu_offset += this_length; | 
|  | 734 | length -= this_length; | 
|  | 735 | } | 
|  | 736 |  | 
|  | 737 | return 0; | 
|  | 738 | } | 
|  | 739 |  | 
| Brad Volkin | 4c914c0 | 2014-02-18 10:15:45 -0800 | [diff] [blame] | 740 | /* | 
|  | 741 | * Pins the specified object's pages and synchronizes the object with | 
|  | 742 | * GPU accesses. Sets needs_clflush to non-zero if the caller should | 
|  | 743 | * flush the object from the CPU cache. | 
|  | 744 | */ | 
|  | 745 | int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 746 | unsigned int *needs_clflush) | 
| Brad Volkin | 4c914c0 | 2014-02-18 10:15:45 -0800 | [diff] [blame] | 747 | { | 
|  | 748 | int ret; | 
|  | 749 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 750 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 
| Brad Volkin | 4c914c0 | 2014-02-18 10:15:45 -0800 | [diff] [blame] | 751 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 752 | *needs_clflush = 0; | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 753 | if (!i915_gem_object_has_struct_page(obj)) | 
|  | 754 | return -ENODEV; | 
| Brad Volkin | 4c914c0 | 2014-02-18 10:15:45 -0800 | [diff] [blame] | 755 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 756 | ret = i915_gem_object_wait(obj, | 
|  | 757 | I915_WAIT_INTERRUPTIBLE | | 
|  | 758 | I915_WAIT_LOCKED, | 
|  | 759 | MAX_SCHEDULE_TIMEOUT, | 
|  | 760 | NULL); | 
| Chris Wilson | c13d87e | 2016-07-20 09:21:15 +0100 | [diff] [blame] | 761 | if (ret) | 
|  | 762 | return ret; | 
|  | 763 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 764 | ret = i915_gem_object_pin_pages(obj); | 
| Chris Wilson | 9764951 | 2016-08-18 17:16:50 +0100 | [diff] [blame] | 765 | if (ret) | 
|  | 766 | return ret; | 
|  | 767 |  | 
| Chris Wilson | a314d5c | 2016-08-18 17:16:48 +0100 | [diff] [blame] | 768 | i915_gem_object_flush_gtt_write_domain(obj); | 
|  | 769 |  | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 770 | /* If we're not in the cpu read domain, set ourself into the gtt | 
|  | 771 | * read domain and manually flush cachelines (if required). This | 
|  | 772 | * optimizes for the case when the gpu will dirty the data | 
|  | 773 | * anyway again before the next pread happens. | 
|  | 774 | */ | 
|  | 775 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) | 
| Brad Volkin | 4c914c0 | 2014-02-18 10:15:45 -0800 | [diff] [blame] | 776 | *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, | 
|  | 777 | obj->cache_level); | 
| Brad Volkin | 4c914c0 | 2014-02-18 10:15:45 -0800 | [diff] [blame] | 778 |  | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 779 | if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { | 
|  | 780 | ret = i915_gem_object_set_to_cpu_domain(obj, false); | 
| Chris Wilson | 9764951 | 2016-08-18 17:16:50 +0100 | [diff] [blame] | 781 | if (ret) | 
|  | 782 | goto err_unpin; | 
|  | 783 |  | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 784 | *needs_clflush = 0; | 
|  | 785 | } | 
|  | 786 |  | 
| Chris Wilson | 9764951 | 2016-08-18 17:16:50 +0100 | [diff] [blame] | 787 | /* return with the pages pinned */ | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 788 | return 0; | 
| Chris Wilson | 9764951 | 2016-08-18 17:16:50 +0100 | [diff] [blame] | 789 |  | 
|  | 790 | err_unpin: | 
|  | 791 | i915_gem_object_unpin_pages(obj); | 
|  | 792 | return ret; | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 793 | } | 
|  | 794 |  | 
|  | 795 | int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, | 
|  | 796 | unsigned int *needs_clflush) | 
|  | 797 | { | 
|  | 798 | int ret; | 
|  | 799 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 800 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 
|  | 801 |  | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 802 | *needs_clflush = 0; | 
|  | 803 | if (!i915_gem_object_has_struct_page(obj)) | 
|  | 804 | return -ENODEV; | 
|  | 805 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 806 | ret = i915_gem_object_wait(obj, | 
|  | 807 | I915_WAIT_INTERRUPTIBLE | | 
|  | 808 | I915_WAIT_LOCKED | | 
|  | 809 | I915_WAIT_ALL, | 
|  | 810 | MAX_SCHEDULE_TIMEOUT, | 
|  | 811 | NULL); | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 812 | if (ret) | 
|  | 813 | return ret; | 
|  | 814 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 815 | ret = i915_gem_object_pin_pages(obj); | 
| Chris Wilson | 9764951 | 2016-08-18 17:16:50 +0100 | [diff] [blame] | 816 | if (ret) | 
|  | 817 | return ret; | 
|  | 818 |  | 
| Chris Wilson | a314d5c | 2016-08-18 17:16:48 +0100 | [diff] [blame] | 819 | i915_gem_object_flush_gtt_write_domain(obj); | 
|  | 820 |  | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 821 | /* If we're not in the cpu write domain, set ourself into the | 
|  | 822 | * gtt write domain and manually flush cachelines (as required). | 
|  | 823 | * This optimizes for the case when the gpu will use the data | 
|  | 824 | * right away and we therefore have to clflush anyway. | 
|  | 825 | */ | 
|  | 826 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) | 
|  | 827 | *needs_clflush |= cpu_write_needs_clflush(obj) << 1; | 
|  | 828 |  | 
|  | 829 | /* Same trick applies to invalidate partially written cachelines read | 
|  | 830 | * before writing. | 
|  | 831 | */ | 
|  | 832 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) | 
|  | 833 | *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev, | 
|  | 834 | obj->cache_level); | 
|  | 835 |  | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 836 | if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { | 
|  | 837 | ret = i915_gem_object_set_to_cpu_domain(obj, true); | 
| Chris Wilson | 9764951 | 2016-08-18 17:16:50 +0100 | [diff] [blame] | 838 | if (ret) | 
|  | 839 | goto err_unpin; | 
|  | 840 |  | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 841 | *needs_clflush = 0; | 
|  | 842 | } | 
|  | 843 |  | 
|  | 844 | if ((*needs_clflush & CLFLUSH_AFTER) == 0) | 
|  | 845 | obj->cache_dirty = true; | 
|  | 846 |  | 
|  | 847 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 848 | obj->mm.dirty = true; | 
| Chris Wilson | 9764951 | 2016-08-18 17:16:50 +0100 | [diff] [blame] | 849 | /* return with the pages pinned */ | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 850 | return 0; | 
| Chris Wilson | 9764951 | 2016-08-18 17:16:50 +0100 | [diff] [blame] | 851 |  | 
|  | 852 | err_unpin: | 
|  | 853 | i915_gem_object_unpin_pages(obj); | 
|  | 854 | return ret; | 
| Brad Volkin | 4c914c0 | 2014-02-18 10:15:45 -0800 | [diff] [blame] | 855 | } | 
|  | 856 |  | 
| Daniel Vetter | 23c18c7 | 2012-03-25 19:47:42 +0200 | [diff] [blame] | 857 | static void | 
|  | 858 | shmem_clflush_swizzled_range(char *addr, unsigned long length, | 
|  | 859 | bool swizzled) | 
|  | 860 | { | 
| Daniel Vetter | e7e58eb | 2012-03-25 19:47:43 +0200 | [diff] [blame] | 861 | if (unlikely(swizzled)) { | 
| Daniel Vetter | 23c18c7 | 2012-03-25 19:47:42 +0200 | [diff] [blame] | 862 | unsigned long start = (unsigned long) addr; | 
|  | 863 | unsigned long end = (unsigned long) addr + length; | 
|  | 864 |  | 
|  | 865 | /* For swizzling simply ensure that we always flush both | 
|  | 866 | * channels. Lame, but simple and it works. Swizzled | 
|  | 867 | * pwrite/pread is far from a hotpath - current userspace | 
|  | 868 | * doesn't use it at all. */ | 
|  | 869 | start = round_down(start, 128); | 
|  | 870 | end = round_up(end, 128); | 
|  | 871 |  | 
|  | 872 | drm_clflush_virt_range((void *)start, end - start); | 
|  | 873 | } else { | 
|  | 874 | drm_clflush_virt_range(addr, length); | 
|  | 875 | } | 
|  | 876 |  | 
|  | 877 | } | 
|  | 878 |  | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 879 | /* Only difference to the fast-path function is that this can handle bit17 | 
|  | 880 | * and uses non-atomic copy and kmap functions. */ | 
|  | 881 | static int | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 882 | shmem_pread_slow(struct page *page, int offset, int length, | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 883 | char __user *user_data, | 
|  | 884 | bool page_do_bit17_swizzling, bool needs_clflush) | 
|  | 885 | { | 
|  | 886 | char *vaddr; | 
|  | 887 | int ret; | 
|  | 888 |  | 
|  | 889 | vaddr = kmap(page); | 
|  | 890 | if (needs_clflush) | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 891 | shmem_clflush_swizzled_range(vaddr + offset, length, | 
| Daniel Vetter | 23c18c7 | 2012-03-25 19:47:42 +0200 | [diff] [blame] | 892 | page_do_bit17_swizzling); | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 893 |  | 
|  | 894 | if (page_do_bit17_swizzling) | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 895 | ret = __copy_to_user_swizzled(user_data, vaddr, offset, length); | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 896 | else | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 897 | ret = __copy_to_user(user_data, vaddr + offset, length); | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 898 | kunmap(page); | 
|  | 899 |  | 
| Chris Wilson | f60d7f0 | 2012-09-04 21:02:56 +0100 | [diff] [blame] | 900 | return ret ? - EFAULT : 0; | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 901 | } | 
|  | 902 |  | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 903 | static int | 
|  | 904 | shmem_pread(struct page *page, int offset, int length, char __user *user_data, | 
|  | 905 | bool page_do_bit17_swizzling, bool needs_clflush) | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 906 | { | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 907 | int ret; | 
|  | 908 |  | 
|  | 909 | ret = -ENODEV; | 
|  | 910 | if (!page_do_bit17_swizzling) { | 
|  | 911 | char *vaddr = kmap_atomic(page); | 
|  | 912 |  | 
|  | 913 | if (needs_clflush) | 
|  | 914 | drm_clflush_virt_range(vaddr + offset, length); | 
|  | 915 | ret = __copy_to_user_inatomic(user_data, vaddr + offset, length); | 
|  | 916 | kunmap_atomic(vaddr); | 
|  | 917 | } | 
|  | 918 | if (ret == 0) | 
|  | 919 | return 0; | 
|  | 920 |  | 
|  | 921 | return shmem_pread_slow(page, offset, length, user_data, | 
|  | 922 | page_do_bit17_swizzling, needs_clflush); | 
|  | 923 | } | 
|  | 924 |  | 
|  | 925 | static int | 
|  | 926 | i915_gem_shmem_pread(struct drm_i915_gem_object *obj, | 
|  | 927 | struct drm_i915_gem_pread *args) | 
|  | 928 | { | 
|  | 929 | char __user *user_data; | 
|  | 930 | u64 remain; | 
|  | 931 | unsigned int obj_do_bit17_swizzling; | 
|  | 932 | unsigned int needs_clflush; | 
|  | 933 | unsigned int idx, offset; | 
|  | 934 | int ret; | 
|  | 935 |  | 
|  | 936 | obj_do_bit17_swizzling = 0; | 
|  | 937 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 
|  | 938 | obj_do_bit17_swizzling = BIT(17); | 
|  | 939 |  | 
|  | 940 | ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); | 
|  | 941 | if (ret) | 
|  | 942 | return ret; | 
|  | 943 |  | 
|  | 944 | ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); | 
|  | 945 | mutex_unlock(&obj->base.dev->struct_mutex); | 
|  | 946 | if (ret) | 
|  | 947 | return ret; | 
|  | 948 |  | 
|  | 949 | remain = args->size; | 
|  | 950 | user_data = u64_to_user_ptr(args->data_ptr); | 
|  | 951 | offset = offset_in_page(args->offset); | 
|  | 952 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { | 
|  | 953 | struct page *page = i915_gem_object_get_page(obj, idx); | 
|  | 954 | int length; | 
|  | 955 |  | 
|  | 956 | length = remain; | 
|  | 957 | if (offset + length > PAGE_SIZE) | 
|  | 958 | length = PAGE_SIZE - offset; | 
|  | 959 |  | 
|  | 960 | ret = shmem_pread(page, offset, length, user_data, | 
|  | 961 | page_to_phys(page) & obj_do_bit17_swizzling, | 
|  | 962 | needs_clflush); | 
|  | 963 | if (ret) | 
|  | 964 | break; | 
|  | 965 |  | 
|  | 966 | remain -= length; | 
|  | 967 | user_data += length; | 
|  | 968 | offset = 0; | 
|  | 969 | } | 
|  | 970 |  | 
|  | 971 | i915_gem_obj_finish_shmem_access(obj); | 
|  | 972 | return ret; | 
|  | 973 | } | 
|  | 974 |  | 
|  | 975 | static inline bool | 
|  | 976 | gtt_user_read(struct io_mapping *mapping, | 
|  | 977 | loff_t base, int offset, | 
|  | 978 | char __user *user_data, int length) | 
|  | 979 | { | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 980 | void *vaddr; | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 981 | unsigned long unwritten; | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 982 |  | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 983 | /* We can use the cpu mem copy function because this is X86. */ | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 984 | vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); | 
|  | 985 | unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length); | 
|  | 986 | io_mapping_unmap_atomic(vaddr); | 
|  | 987 | if (unwritten) { | 
|  | 988 | vaddr = (void __force *) | 
|  | 989 | io_mapping_map_wc(mapping, base, PAGE_SIZE); | 
|  | 990 | unwritten = copy_to_user(user_data, vaddr + offset, length); | 
|  | 991 | io_mapping_unmap(vaddr); | 
|  | 992 | } | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 993 | return unwritten; | 
|  | 994 | } | 
|  | 995 |  | 
|  | 996 | static int | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 997 | i915_gem_gtt_pread(struct drm_i915_gem_object *obj, | 
|  | 998 | const struct drm_i915_gem_pread *args) | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 999 | { | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1000 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 
|  | 1001 | struct i915_ggtt *ggtt = &i915->ggtt; | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1002 | struct drm_mm_node node; | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1003 | struct i915_vma *vma; | 
|  | 1004 | void __user *user_data; | 
|  | 1005 | u64 remain, offset; | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1006 | int ret; | 
|  | 1007 |  | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1008 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); | 
|  | 1009 | if (ret) | 
|  | 1010 | return ret; | 
|  | 1011 |  | 
|  | 1012 | intel_runtime_pm_get(i915); | 
|  | 1013 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, | 
|  | 1014 | PIN_MAPPABLE | PIN_NONBLOCK); | 
| Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 1015 | if (!IS_ERR(vma)) { | 
|  | 1016 | node.start = i915_ggtt_offset(vma); | 
|  | 1017 | node.allocated = false; | 
| Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 1018 | ret = i915_vma_put_fence(vma); | 
| Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 1019 | if (ret) { | 
|  | 1020 | i915_vma_unpin(vma); | 
|  | 1021 | vma = ERR_PTR(ret); | 
|  | 1022 | } | 
|  | 1023 | } | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1024 | if (IS_ERR(vma)) { | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1025 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1026 | if (ret) | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1027 | goto out_unlock; | 
|  | 1028 | GEM_BUG_ON(!node.allocated); | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1029 | } | 
|  | 1030 |  | 
|  | 1031 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | 
|  | 1032 | if (ret) | 
|  | 1033 | goto out_unpin; | 
|  | 1034 |  | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1035 | mutex_unlock(&i915->drm.struct_mutex); | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1036 |  | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1037 | user_data = u64_to_user_ptr(args->data_ptr); | 
|  | 1038 | remain = args->size; | 
|  | 1039 | offset = args->offset; | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1040 |  | 
|  | 1041 | while (remain > 0) { | 
|  | 1042 | /* Operation in this page | 
|  | 1043 | * | 
|  | 1044 | * page_base = page offset within aperture | 
|  | 1045 | * page_offset = offset within page | 
|  | 1046 | * page_length = bytes to copy for this page | 
|  | 1047 | */ | 
|  | 1048 | u32 page_base = node.start; | 
|  | 1049 | unsigned page_offset = offset_in_page(offset); | 
|  | 1050 | unsigned page_length = PAGE_SIZE - page_offset; | 
|  | 1051 | page_length = remain < page_length ? remain : page_length; | 
|  | 1052 | if (node.allocated) { | 
|  | 1053 | wmb(); | 
|  | 1054 | ggtt->base.insert_page(&ggtt->base, | 
|  | 1055 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1056 | node.start, I915_CACHE_NONE, 0); | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1057 | wmb(); | 
|  | 1058 | } else { | 
|  | 1059 | page_base += offset & PAGE_MASK; | 
|  | 1060 | } | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1061 |  | 
|  | 1062 | if (gtt_user_read(&ggtt->mappable, page_base, page_offset, | 
|  | 1063 | user_data, page_length)) { | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1064 | ret = -EFAULT; | 
|  | 1065 | break; | 
|  | 1066 | } | 
|  | 1067 |  | 
|  | 1068 | remain -= page_length; | 
|  | 1069 | user_data += page_length; | 
|  | 1070 | offset += page_length; | 
|  | 1071 | } | 
|  | 1072 |  | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1073 | mutex_lock(&i915->drm.struct_mutex); | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1074 | out_unpin: | 
|  | 1075 | if (node.allocated) { | 
|  | 1076 | wmb(); | 
|  | 1077 | ggtt->base.clear_range(&ggtt->base, | 
| Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 1078 | node.start, node.size); | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1079 | remove_mappable_node(&node); | 
|  | 1080 | } else { | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1081 | i915_vma_unpin(vma); | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1082 | } | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1083 | out_unlock: | 
|  | 1084 | intel_runtime_pm_put(i915); | 
|  | 1085 | mutex_unlock(&i915->drm.struct_mutex); | 
| Chris Wilson | f60d7f0 | 2012-09-04 21:02:56 +0100 | [diff] [blame] | 1086 |  | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 1087 | return ret; | 
|  | 1088 | } | 
|  | 1089 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1090 | /** | 
|  | 1091 | * Reads data from the object referenced by handle. | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 1092 | * @dev: drm device pointer | 
|  | 1093 | * @data: ioctl data blob | 
|  | 1094 | * @file: drm file pointer | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1095 | * | 
|  | 1096 | * On error, the contents of *data are undefined. | 
|  | 1097 | */ | 
|  | 1098 | int | 
|  | 1099 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1100 | struct drm_file *file) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1101 | { | 
|  | 1102 | struct drm_i915_gem_pread *args = data; | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1103 | struct drm_i915_gem_object *obj; | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1104 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1105 |  | 
| Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 1106 | if (args->size == 0) | 
|  | 1107 | return 0; | 
|  | 1108 |  | 
|  | 1109 | if (!access_ok(VERIFY_WRITE, | 
| Gustavo Padovan | 3ed605b | 2016-04-26 12:32:27 -0300 | [diff] [blame] | 1110 | u64_to_user_ptr(args->data_ptr), | 
| Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 1111 | args->size)) | 
|  | 1112 | return -EFAULT; | 
|  | 1113 |  | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1114 | obj = i915_gem_object_lookup(file, args->handle); | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1115 | if (!obj) | 
|  | 1116 | return -ENOENT; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1117 |  | 
| Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 1118 | /* Bounds check source.  */ | 
| Matthew Auld | 966d5bf | 2016-12-13 20:32:22 +0000 | [diff] [blame] | 1119 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 1120 | ret = -EINVAL; | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1121 | goto out; | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 1122 | } | 
|  | 1123 |  | 
| Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 1124 | trace_i915_gem_object_pread(obj, args->offset, args->size); | 
|  | 1125 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1126 | ret = i915_gem_object_wait(obj, | 
|  | 1127 | I915_WAIT_INTERRUPTIBLE, | 
|  | 1128 | MAX_SCHEDULE_TIMEOUT, | 
|  | 1129 | to_rps_client(file)); | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1130 | if (ret) | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1131 | goto out; | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1132 |  | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1133 | ret = i915_gem_object_pin_pages(obj); | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1134 | if (ret) | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1135 | goto out; | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1136 |  | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1137 | ret = i915_gem_shmem_pread(obj, args); | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1138 | if (ret == -EFAULT || ret == -ENODEV) | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1139 | ret = i915_gem_gtt_pread(obj, args); | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1140 |  | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1141 | i915_gem_object_unpin_pages(obj); | 
|  | 1142 | out: | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1143 | i915_gem_object_put(obj); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 1144 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1145 | } | 
|  | 1146 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 1147 | /* This is the fast write path which cannot handle | 
|  | 1148 | * page faults in the source data | 
| Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 1149 | */ | 
| Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 1150 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1151 | static inline bool | 
|  | 1152 | ggtt_write(struct io_mapping *mapping, | 
|  | 1153 | loff_t base, int offset, | 
|  | 1154 | char __user *user_data, int length) | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 1155 | { | 
| Ben Widawsky | 4f0c7cf | 2012-04-16 14:07:47 -0700 | [diff] [blame] | 1156 | void *vaddr; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 1157 | unsigned long unwritten; | 
|  | 1158 |  | 
| Ben Widawsky | 4f0c7cf | 2012-04-16 14:07:47 -0700 | [diff] [blame] | 1159 | /* We can use the cpu mem copy function because this is X86. */ | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1160 | vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); | 
|  | 1161 | unwritten = __copy_from_user_inatomic_nocache(vaddr + offset, | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 1162 | user_data, length); | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1163 | io_mapping_unmap_atomic(vaddr); | 
|  | 1164 | if (unwritten) { | 
|  | 1165 | vaddr = (void __force *) | 
|  | 1166 | io_mapping_map_wc(mapping, base, PAGE_SIZE); | 
|  | 1167 | unwritten = copy_from_user(vaddr + offset, user_data, length); | 
|  | 1168 | io_mapping_unmap(vaddr); | 
|  | 1169 | } | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 1170 |  | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1171 | return unwritten; | 
|  | 1172 | } | 
|  | 1173 |  | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 1174 | /** | 
|  | 1175 | * This is the fast pwrite path, where we copy the data directly from the | 
|  | 1176 | * user into the GTT, uncached. | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1177 | * @obj: i915 GEM object | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 1178 | * @args: pwrite arguments structure | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 1179 | */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1180 | static int | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1181 | i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, | 
|  | 1182 | const struct drm_i915_gem_pwrite *args) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1183 | { | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1184 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1185 | struct i915_ggtt *ggtt = &i915->ggtt; | 
|  | 1186 | struct drm_mm_node node; | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1187 | struct i915_vma *vma; | 
|  | 1188 | u64 remain, offset; | 
|  | 1189 | void __user *user_data; | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1190 | int ret; | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1191 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1192 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); | 
|  | 1193 | if (ret) | 
|  | 1194 | return ret; | 
| Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 1195 |  | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1196 | intel_runtime_pm_get(i915); | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1197 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, | 
| Chris Wilson | de89508 | 2016-08-04 16:32:34 +0100 | [diff] [blame] | 1198 | PIN_MAPPABLE | PIN_NONBLOCK); | 
| Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 1199 | if (!IS_ERR(vma)) { | 
|  | 1200 | node.start = i915_ggtt_offset(vma); | 
|  | 1201 | node.allocated = false; | 
| Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 1202 | ret = i915_vma_put_fence(vma); | 
| Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 1203 | if (ret) { | 
|  | 1204 | i915_vma_unpin(vma); | 
|  | 1205 | vma = ERR_PTR(ret); | 
|  | 1206 | } | 
|  | 1207 | } | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1208 | if (IS_ERR(vma)) { | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1209 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1210 | if (ret) | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1211 | goto out_unlock; | 
|  | 1212 | GEM_BUG_ON(!node.allocated); | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1213 | } | 
| Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 1214 |  | 
|  | 1215 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | 
|  | 1216 | if (ret) | 
|  | 1217 | goto out_unpin; | 
|  | 1218 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1219 | mutex_unlock(&i915->drm.struct_mutex); | 
|  | 1220 |  | 
| Chris Wilson | b19482d | 2016-08-18 17:16:43 +0100 | [diff] [blame] | 1221 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); | 
| Paulo Zanoni | 063e4e6 | 2015-02-13 17:23:45 -0200 | [diff] [blame] | 1222 |  | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1223 | user_data = u64_to_user_ptr(args->data_ptr); | 
|  | 1224 | offset = args->offset; | 
|  | 1225 | remain = args->size; | 
|  | 1226 | while (remain) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1227 | /* Operation in this page | 
|  | 1228 | * | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 1229 | * page_base = page offset within aperture | 
|  | 1230 | * page_offset = offset within page | 
|  | 1231 | * page_length = bytes to copy for this page | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1232 | */ | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1233 | u32 page_base = node.start; | 
| Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 1234 | unsigned int page_offset = offset_in_page(offset); | 
|  | 1235 | unsigned int page_length = PAGE_SIZE - page_offset; | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1236 | page_length = remain < page_length ? remain : page_length; | 
|  | 1237 | if (node.allocated) { | 
|  | 1238 | wmb(); /* flush the write before we modify the GGTT */ | 
|  | 1239 | ggtt->base.insert_page(&ggtt->base, | 
|  | 1240 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), | 
|  | 1241 | node.start, I915_CACHE_NONE, 0); | 
|  | 1242 | wmb(); /* flush modifications to the GGTT (insert_page) */ | 
|  | 1243 | } else { | 
|  | 1244 | page_base += offset & PAGE_MASK; | 
|  | 1245 | } | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 1246 | /* If we get a fault while copying data, then (presumably) our | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 1247 | * source page isn't available.  Return the error and we'll | 
|  | 1248 | * retry in the slow path. | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1249 | * If the object is non-shmem backed, we retry again with the | 
|  | 1250 | * path that handles page fault. | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 1251 | */ | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1252 | if (ggtt_write(&ggtt->mappable, page_base, page_offset, | 
|  | 1253 | user_data, page_length)) { | 
|  | 1254 | ret = -EFAULT; | 
|  | 1255 | break; | 
| Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 1256 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1257 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 1258 | remain -= page_length; | 
|  | 1259 | user_data += page_length; | 
|  | 1260 | offset += page_length; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1261 | } | 
| Chris Wilson | b19482d | 2016-08-18 17:16:43 +0100 | [diff] [blame] | 1262 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1263 |  | 
|  | 1264 | mutex_lock(&i915->drm.struct_mutex); | 
| Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 1265 | out_unpin: | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1266 | if (node.allocated) { | 
|  | 1267 | wmb(); | 
|  | 1268 | ggtt->base.clear_range(&ggtt->base, | 
| Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 1269 | node.start, node.size); | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1270 | remove_mappable_node(&node); | 
|  | 1271 | } else { | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1272 | i915_vma_unpin(vma); | 
| Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 1273 | } | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1274 | out_unlock: | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1275 | intel_runtime_pm_put(i915); | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1276 | mutex_unlock(&i915->drm.struct_mutex); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 1277 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1278 | } | 
|  | 1279 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1280 | static int | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1281 | shmem_pwrite_slow(struct page *page, int offset, int length, | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 1282 | char __user *user_data, | 
|  | 1283 | bool page_do_bit17_swizzling, | 
|  | 1284 | bool needs_clflush_before, | 
|  | 1285 | bool needs_clflush_after) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1286 | { | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 1287 | char *vaddr; | 
|  | 1288 | int ret; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1289 |  | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 1290 | vaddr = kmap(page); | 
| Daniel Vetter | e7e58eb | 2012-03-25 19:47:43 +0200 | [diff] [blame] | 1291 | if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1292 | shmem_clflush_swizzled_range(vaddr + offset, length, | 
| Daniel Vetter | 23c18c7 | 2012-03-25 19:47:42 +0200 | [diff] [blame] | 1293 | page_do_bit17_swizzling); | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 1294 | if (page_do_bit17_swizzling) | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1295 | ret = __copy_from_user_swizzled(vaddr, offset, user_data, | 
|  | 1296 | length); | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 1297 | else | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1298 | ret = __copy_from_user(vaddr + offset, user_data, length); | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 1299 | if (needs_clflush_after) | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1300 | shmem_clflush_swizzled_range(vaddr + offset, length, | 
| Daniel Vetter | 23c18c7 | 2012-03-25 19:47:42 +0200 | [diff] [blame] | 1301 | page_do_bit17_swizzling); | 
| Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 1302 | kunmap(page); | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 1303 |  | 
| Chris Wilson | 755d221 | 2012-09-04 21:02:55 +0100 | [diff] [blame] | 1304 | return ret ? -EFAULT : 0; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1305 | } | 
|  | 1306 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1307 | /* Per-page copy function for the shmem pwrite fastpath. | 
|  | 1308 | * Flushes invalid cachelines before writing to the target if | 
|  | 1309 | * needs_clflush_before is set and flushes out any written cachelines after | 
|  | 1310 | * writing if needs_clflush is set. | 
|  | 1311 | */ | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1312 | static int | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1313 | shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, | 
|  | 1314 | bool page_do_bit17_swizzling, | 
|  | 1315 | bool needs_clflush_before, | 
|  | 1316 | bool needs_clflush_after) | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1317 | { | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1318 | int ret; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1319 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1320 | ret = -ENODEV; | 
|  | 1321 | if (!page_do_bit17_swizzling) { | 
|  | 1322 | char *vaddr = kmap_atomic(page); | 
|  | 1323 |  | 
|  | 1324 | if (needs_clflush_before) | 
|  | 1325 | drm_clflush_virt_range(vaddr + offset, len); | 
|  | 1326 | ret = __copy_from_user_inatomic(vaddr + offset, user_data, len); | 
|  | 1327 | if (needs_clflush_after) | 
|  | 1328 | drm_clflush_virt_range(vaddr + offset, len); | 
|  | 1329 |  | 
|  | 1330 | kunmap_atomic(vaddr); | 
|  | 1331 | } | 
|  | 1332 | if (ret == 0) | 
|  | 1333 | return ret; | 
|  | 1334 |  | 
|  | 1335 | return shmem_pwrite_slow(page, offset, len, user_data, | 
|  | 1336 | page_do_bit17_swizzling, | 
|  | 1337 | needs_clflush_before, | 
|  | 1338 | needs_clflush_after); | 
|  | 1339 | } | 
|  | 1340 |  | 
|  | 1341 | static int | 
|  | 1342 | i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, | 
|  | 1343 | const struct drm_i915_gem_pwrite *args) | 
|  | 1344 | { | 
|  | 1345 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 
|  | 1346 | void __user *user_data; | 
|  | 1347 | u64 remain; | 
|  | 1348 | unsigned int obj_do_bit17_swizzling; | 
|  | 1349 | unsigned int partial_cacheline_write; | 
|  | 1350 | unsigned int needs_clflush; | 
|  | 1351 | unsigned int offset, idx; | 
|  | 1352 | int ret; | 
|  | 1353 |  | 
|  | 1354 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 1355 | if (ret) | 
|  | 1356 | return ret; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1357 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1358 | ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); | 
|  | 1359 | mutex_unlock(&i915->drm.struct_mutex); | 
|  | 1360 | if (ret) | 
|  | 1361 | return ret; | 
|  | 1362 |  | 
|  | 1363 | obj_do_bit17_swizzling = 0; | 
|  | 1364 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 
|  | 1365 | obj_do_bit17_swizzling = BIT(17); | 
|  | 1366 |  | 
|  | 1367 | /* If we don't overwrite a cacheline completely we need to be | 
|  | 1368 | * careful to have up-to-date data by first clflushing. Don't | 
|  | 1369 | * overcomplicate things and flush the entire patch. | 
|  | 1370 | */ | 
|  | 1371 | partial_cacheline_write = 0; | 
|  | 1372 | if (needs_clflush & CLFLUSH_BEFORE) | 
|  | 1373 | partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; | 
|  | 1374 |  | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 1375 | user_data = u64_to_user_ptr(args->data_ptr); | 
| Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 1376 | remain = args->size; | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1377 | offset = offset_in_page(args->offset); | 
|  | 1378 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { | 
|  | 1379 | struct page *page = i915_gem_object_get_page(obj, idx); | 
|  | 1380 | int length; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1381 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1382 | length = remain; | 
|  | 1383 | if (offset + length > PAGE_SIZE) | 
|  | 1384 | length = PAGE_SIZE - offset; | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 1385 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1386 | ret = shmem_pwrite(page, offset, length, user_data, | 
|  | 1387 | page_to_phys(page) & obj_do_bit17_swizzling, | 
|  | 1388 | (offset | length) & partial_cacheline_write, | 
|  | 1389 | needs_clflush & CLFLUSH_AFTER); | 
|  | 1390 | if (ret) | 
| Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 1391 | break; | 
|  | 1392 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1393 | remain -= length; | 
|  | 1394 | user_data += length; | 
|  | 1395 | offset = 0; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1396 | } | 
|  | 1397 |  | 
| Rodrigo Vivi | de152b6 | 2015-07-07 16:28:51 -0700 | [diff] [blame] | 1398 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1399 | i915_gem_obj_finish_shmem_access(obj); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1400 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1401 | } | 
|  | 1402 |  | 
|  | 1403 | /** | 
|  | 1404 | * Writes data to the object referenced by handle. | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 1405 | * @dev: drm device | 
|  | 1406 | * @data: ioctl data blob | 
|  | 1407 | * @file: drm file | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1408 | * | 
|  | 1409 | * On error, the contents of the buffer that were to be modified are undefined. | 
|  | 1410 | */ | 
|  | 1411 | int | 
|  | 1412 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1413 | struct drm_file *file) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1414 | { | 
|  | 1415 | struct drm_i915_gem_pwrite *args = data; | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1416 | struct drm_i915_gem_object *obj; | 
| Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 1417 | int ret; | 
|  | 1418 |  | 
|  | 1419 | if (args->size == 0) | 
|  | 1420 | return 0; | 
|  | 1421 |  | 
|  | 1422 | if (!access_ok(VERIFY_READ, | 
| Gustavo Padovan | 3ed605b | 2016-04-26 12:32:27 -0300 | [diff] [blame] | 1423 | u64_to_user_ptr(args->data_ptr), | 
| Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 1424 | args->size)) | 
|  | 1425 | return -EFAULT; | 
|  | 1426 |  | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1427 | obj = i915_gem_object_lookup(file, args->handle); | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1428 | if (!obj) | 
|  | 1429 | return -ENOENT; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1430 |  | 
| Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 1431 | /* Bounds check destination. */ | 
| Matthew Auld | 966d5bf | 2016-12-13 20:32:22 +0000 | [diff] [blame] | 1432 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 1433 | ret = -EINVAL; | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1434 | goto err; | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 1435 | } | 
|  | 1436 |  | 
| Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 1437 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | 
|  | 1438 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1439 | ret = i915_gem_object_wait(obj, | 
|  | 1440 | I915_WAIT_INTERRUPTIBLE | | 
|  | 1441 | I915_WAIT_ALL, | 
|  | 1442 | MAX_SCHEDULE_TIMEOUT, | 
|  | 1443 | to_rps_client(file)); | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1444 | if (ret) | 
|  | 1445 | goto err; | 
|  | 1446 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1447 | ret = i915_gem_object_pin_pages(obj); | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1448 | if (ret) | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1449 | goto err; | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1450 |  | 
| Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 1451 | ret = -EFAULT; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1452 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 
|  | 1453 | * it would end up going through the fenced access, and we'll get | 
|  | 1454 | * different detiling behavior between reading and writing. | 
|  | 1455 | * pread/pwrite currently are reading and writing from the CPU | 
|  | 1456 | * perspective, requiring manual detiling by the client. | 
|  | 1457 | */ | 
| Chris Wilson | 6eae005 | 2016-06-20 15:05:52 +0100 | [diff] [blame] | 1458 | if (!i915_gem_object_has_struct_page(obj) || | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1459 | cpu_write_needs_clflush(obj)) | 
| Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 1460 | /* Note that the gtt paths might fail with non-page-backed user | 
|  | 1461 | * pointers (e.g. gtt mappings when moving data between | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1462 | * textures). Fallback to the shmem path in that case. | 
|  | 1463 | */ | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1464 | ret = i915_gem_gtt_pwrite_fast(obj, args); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1465 |  | 
| Chris Wilson | d1054ee | 2016-07-16 18:42:36 +0100 | [diff] [blame] | 1466 | if (ret == -EFAULT || ret == -ENOSPC) { | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 1467 | if (obj->phys_handle) | 
|  | 1468 | ret = i915_gem_phys_pwrite(obj, args, file); | 
| Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1469 | else | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1470 | ret = i915_gem_shmem_pwrite(obj, args); | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 1471 | } | 
| Daniel Vetter | 5c0480f | 2011-12-14 13:57:30 +0100 | [diff] [blame] | 1472 |  | 
| Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1473 | i915_gem_object_unpin_pages(obj); | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1474 | err: | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1475 | i915_gem_object_put(obj); | 
| Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1476 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1477 | } | 
|  | 1478 |  | 
| Chris Wilson | d243ad8 | 2016-08-18 17:16:44 +0100 | [diff] [blame] | 1479 | static inline enum fb_op_origin | 
| Chris Wilson | aeecc96 | 2016-06-17 14:46:39 -0300 | [diff] [blame] | 1480 | write_origin(struct drm_i915_gem_object *obj, unsigned domain) | 
|  | 1481 | { | 
| Chris Wilson | 5034924 | 2016-08-18 17:17:04 +0100 | [diff] [blame] | 1482 | return (domain == I915_GEM_DOMAIN_GTT ? | 
|  | 1483 | obj->frontbuffer_ggtt_origin : ORIGIN_CPU); | 
| Chris Wilson | aeecc96 | 2016-06-17 14:46:39 -0300 | [diff] [blame] | 1484 | } | 
|  | 1485 |  | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1486 | static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) | 
|  | 1487 | { | 
|  | 1488 | struct drm_i915_private *i915; | 
|  | 1489 | struct list_head *list; | 
|  | 1490 | struct i915_vma *vma; | 
|  | 1491 |  | 
|  | 1492 | list_for_each_entry(vma, &obj->vma_list, obj_link) { | 
|  | 1493 | if (!i915_vma_is_ggtt(vma)) | 
| Chris Wilson | 28f412e | 2016-12-23 14:57:55 +0000 | [diff] [blame] | 1494 | break; | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1495 |  | 
|  | 1496 | if (i915_vma_is_active(vma)) | 
|  | 1497 | continue; | 
|  | 1498 |  | 
|  | 1499 | if (!drm_mm_node_allocated(&vma->node)) | 
|  | 1500 | continue; | 
|  | 1501 |  | 
|  | 1502 | list_move_tail(&vma->vm_link, &vma->vm->inactive_list); | 
|  | 1503 | } | 
|  | 1504 |  | 
|  | 1505 | i915 = to_i915(obj->base.dev); | 
|  | 1506 | list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; | 
| Joonas Lahtinen | 56cea32 | 2016-11-02 12:16:04 +0200 | [diff] [blame] | 1507 | list_move_tail(&obj->global_link, list); | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1508 | } | 
|  | 1509 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1510 | /** | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1511 | * Called when user space prepares to use an object with the CPU, either | 
|  | 1512 | * through the mmap ioctl's mapping or a GTT mapping. | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 1513 | * @dev: drm device | 
|  | 1514 | * @data: ioctl data blob | 
|  | 1515 | * @file: drm file | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1516 | */ | 
|  | 1517 | int | 
|  | 1518 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1519 | struct drm_file *file) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1520 | { | 
|  | 1521 | struct drm_i915_gem_set_domain *args = data; | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1522 | struct drm_i915_gem_object *obj; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1523 | uint32_t read_domains = args->read_domains; | 
|  | 1524 | uint32_t write_domain = args->write_domain; | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1525 | int err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1526 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1527 | /* Only handle setting domains to types used by the CPU. */ | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1528 | if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1529 | return -EINVAL; | 
|  | 1530 |  | 
|  | 1531 | /* Having something in the write domain implies it's in the read | 
|  | 1532 | * domain, and only that read domain.  Enforce that in the request. | 
|  | 1533 | */ | 
|  | 1534 | if (write_domain != 0 && read_domains != write_domain) | 
|  | 1535 | return -EINVAL; | 
|  | 1536 |  | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1537 | obj = i915_gem_object_lookup(file, args->handle); | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1538 | if (!obj) | 
|  | 1539 | return -ENOENT; | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 1540 |  | 
| Chris Wilson | 3236f57 | 2012-08-24 09:35:09 +0100 | [diff] [blame] | 1541 | /* Try to flush the object off the GPU without holding the lock. | 
|  | 1542 | * We will repeat the flush holding the lock in the normal manner | 
|  | 1543 | * to catch cases where we are gazumped. | 
|  | 1544 | */ | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1545 | err = i915_gem_object_wait(obj, | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1546 | I915_WAIT_INTERRUPTIBLE | | 
|  | 1547 | (write_domain ? I915_WAIT_ALL : 0), | 
|  | 1548 | MAX_SCHEDULE_TIMEOUT, | 
|  | 1549 | to_rps_client(file)); | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1550 | if (err) | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1551 | goto out; | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1552 |  | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1553 | /* Flush and acquire obj->pages so that we are coherent through | 
|  | 1554 | * direct access in memory with previous cached writes through | 
|  | 1555 | * shmemfs and that our cache domain tracking remains valid. | 
|  | 1556 | * For example, if the obj->filp was moved to swap without us | 
|  | 1557 | * being notified and releasing the pages, we would mistakenly | 
|  | 1558 | * continue to assume that the obj remained out of the CPU cached | 
|  | 1559 | * domain. | 
|  | 1560 | */ | 
|  | 1561 | err = i915_gem_object_pin_pages(obj); | 
|  | 1562 | if (err) | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1563 | goto out; | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1564 |  | 
|  | 1565 | err = i915_mutex_lock_interruptible(dev); | 
|  | 1566 | if (err) | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1567 | goto out_unpin; | 
| Chris Wilson | 3236f57 | 2012-08-24 09:35:09 +0100 | [diff] [blame] | 1568 |  | 
| Chris Wilson | 43566de | 2015-01-02 16:29:29 +0530 | [diff] [blame] | 1569 | if (read_domains & I915_GEM_DOMAIN_GTT) | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1570 | err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 
| Chris Wilson | 43566de | 2015-01-02 16:29:29 +0530 | [diff] [blame] | 1571 | else | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1572 | err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 
|  | 1573 |  | 
|  | 1574 | /* And bump the LRU for this access */ | 
|  | 1575 | i915_gem_object_bump_inactive_ggtt(obj); | 
|  | 1576 |  | 
|  | 1577 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1578 |  | 
| Daniel Vetter | 031b698 | 2015-06-26 19:35:16 +0200 | [diff] [blame] | 1579 | if (write_domain != 0) | 
| Chris Wilson | aeecc96 | 2016-06-17 14:46:39 -0300 | [diff] [blame] | 1580 | intel_fb_obj_invalidate(obj, write_origin(obj, write_domain)); | 
| Daniel Vetter | 031b698 | 2015-06-26 19:35:16 +0200 | [diff] [blame] | 1581 |  | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1582 | out_unpin: | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1583 | i915_gem_object_unpin_pages(obj); | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1584 | out: | 
|  | 1585 | i915_gem_object_put(obj); | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1586 | return err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1587 | } | 
|  | 1588 |  | 
|  | 1589 | /** | 
|  | 1590 | * Called when user space has done writes to this buffer | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 1591 | * @dev: drm device | 
|  | 1592 | * @data: ioctl data blob | 
|  | 1593 | * @file: drm file | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1594 | */ | 
|  | 1595 | int | 
|  | 1596 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1597 | struct drm_file *file) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1598 | { | 
|  | 1599 | struct drm_i915_gem_sw_finish *args = data; | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1600 | struct drm_i915_gem_object *obj; | 
| Chris Wilson | c21724c | 2016-08-05 10:14:19 +0100 | [diff] [blame] | 1601 | int err = 0; | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1602 |  | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1603 | obj = i915_gem_object_lookup(file, args->handle); | 
| Chris Wilson | c21724c | 2016-08-05 10:14:19 +0100 | [diff] [blame] | 1604 | if (!obj) | 
|  | 1605 | return -ENOENT; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1606 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1607 | /* Pinned buffers may be scanout, so flush the cache */ | 
| Chris Wilson | c21724c | 2016-08-05 10:14:19 +0100 | [diff] [blame] | 1608 | if (READ_ONCE(obj->pin_display)) { | 
|  | 1609 | err = i915_mutex_lock_interruptible(dev); | 
|  | 1610 | if (!err) { | 
|  | 1611 | i915_gem_object_flush_cpu_write_domain(obj); | 
|  | 1612 | mutex_unlock(&dev->struct_mutex); | 
|  | 1613 | } | 
|  | 1614 | } | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 1615 |  | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1616 | i915_gem_object_put(obj); | 
| Chris Wilson | c21724c | 2016-08-05 10:14:19 +0100 | [diff] [blame] | 1617 | return err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1618 | } | 
|  | 1619 |  | 
|  | 1620 | /** | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 1621 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address | 
|  | 1622 | *			 it is mapped to. | 
|  | 1623 | * @dev: drm device | 
|  | 1624 | * @data: ioctl data blob | 
|  | 1625 | * @file: drm file | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1626 | * | 
|  | 1627 | * While the mapping holds a reference on the contents of the object, it doesn't | 
|  | 1628 | * imply a ref on the object itself. | 
| Daniel Vetter | 3436738 | 2014-10-16 12:28:18 +0200 | [diff] [blame] | 1629 | * | 
|  | 1630 | * IMPORTANT: | 
|  | 1631 | * | 
|  | 1632 | * DRM driver writers who look a this function as an example for how to do GEM | 
|  | 1633 | * mmap support, please don't implement mmap support like here. The modern way | 
|  | 1634 | * to implement DRM mmap support is with an mmap offset ioctl (like | 
|  | 1635 | * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. | 
|  | 1636 | * That way debug tooling like valgrind will understand what's going on, hiding | 
|  | 1637 | * the mmap call in a driver private ioctl will break that. The i915 driver only | 
|  | 1638 | * does cpu mmaps this way because we didn't know better. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1639 | */ | 
|  | 1640 | int | 
|  | 1641 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1642 | struct drm_file *file) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1643 | { | 
|  | 1644 | struct drm_i915_gem_mmap *args = data; | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1645 | struct drm_i915_gem_object *obj; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1646 | unsigned long addr; | 
|  | 1647 |  | 
| Akash Goel | 1816f92 | 2015-01-02 16:29:30 +0530 | [diff] [blame] | 1648 | if (args->flags & ~(I915_MMAP_WC)) | 
|  | 1649 | return -EINVAL; | 
|  | 1650 |  | 
| Borislav Petkov | 568a58e | 2016-03-29 17:42:01 +0200 | [diff] [blame] | 1651 | if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) | 
| Akash Goel | 1816f92 | 2015-01-02 16:29:30 +0530 | [diff] [blame] | 1652 | return -ENODEV; | 
|  | 1653 |  | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1654 | obj = i915_gem_object_lookup(file, args->handle); | 
|  | 1655 | if (!obj) | 
| Chris Wilson | bf79cb9 | 2010-08-04 14:19:46 +0100 | [diff] [blame] | 1656 | return -ENOENT; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1657 |  | 
| Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 1658 | /* prime objects have no backing filp to GEM mmap | 
|  | 1659 | * pages from. | 
|  | 1660 | */ | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1661 | if (!obj->base.filp) { | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1662 | i915_gem_object_put(obj); | 
| Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 1663 | return -EINVAL; | 
|  | 1664 | } | 
|  | 1665 |  | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1666 | addr = vm_mmap(obj->base.filp, 0, args->size, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1667 | PROT_READ | PROT_WRITE, MAP_SHARED, | 
|  | 1668 | args->offset); | 
| Akash Goel | 1816f92 | 2015-01-02 16:29:30 +0530 | [diff] [blame] | 1669 | if (args->flags & I915_MMAP_WC) { | 
|  | 1670 | struct mm_struct *mm = current->mm; | 
|  | 1671 | struct vm_area_struct *vma; | 
|  | 1672 |  | 
| Michal Hocko | 80a89a5 | 2016-05-23 16:26:11 -0700 | [diff] [blame] | 1673 | if (down_write_killable(&mm->mmap_sem)) { | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1674 | i915_gem_object_put(obj); | 
| Michal Hocko | 80a89a5 | 2016-05-23 16:26:11 -0700 | [diff] [blame] | 1675 | return -EINTR; | 
|  | 1676 | } | 
| Akash Goel | 1816f92 | 2015-01-02 16:29:30 +0530 | [diff] [blame] | 1677 | vma = find_vma(mm, addr); | 
|  | 1678 | if (vma) | 
|  | 1679 | vma->vm_page_prot = | 
|  | 1680 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | 
|  | 1681 | else | 
|  | 1682 | addr = -ENOMEM; | 
|  | 1683 | up_write(&mm->mmap_sem); | 
| Chris Wilson | aeecc96 | 2016-06-17 14:46:39 -0300 | [diff] [blame] | 1684 |  | 
|  | 1685 | /* This may race, but that's ok, it only gets set */ | 
| Chris Wilson | 5034924 | 2016-08-18 17:17:04 +0100 | [diff] [blame] | 1686 | WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); | 
| Akash Goel | 1816f92 | 2015-01-02 16:29:30 +0530 | [diff] [blame] | 1687 | } | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1688 | i915_gem_object_put(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1689 | if (IS_ERR((void *)addr)) | 
|  | 1690 | return addr; | 
|  | 1691 |  | 
|  | 1692 | args->addr_ptr = (uint64_t) addr; | 
|  | 1693 |  | 
|  | 1694 | return 0; | 
|  | 1695 | } | 
|  | 1696 |  | 
| Chris Wilson | 03af84f | 2016-08-18 17:17:01 +0100 | [diff] [blame] | 1697 | static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) | 
|  | 1698 | { | 
| Chris Wilson | 6649a0b | 2017-01-09 16:16:08 +0000 | [diff] [blame] | 1699 | return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; | 
| Chris Wilson | 03af84f | 2016-08-18 17:17:01 +0100 | [diff] [blame] | 1700 | } | 
|  | 1701 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1702 | /** | 
| Chris Wilson | 4cc6907 | 2016-08-25 19:05:19 +0100 | [diff] [blame] | 1703 | * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps | 
|  | 1704 | * | 
|  | 1705 | * A history of the GTT mmap interface: | 
|  | 1706 | * | 
|  | 1707 | * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to | 
|  | 1708 | *     aligned and suitable for fencing, and still fit into the available | 
|  | 1709 | *     mappable space left by the pinned display objects. A classic problem | 
|  | 1710 | *     we called the page-fault-of-doom where we would ping-pong between | 
|  | 1711 | *     two objects that could not fit inside the GTT and so the memcpy | 
|  | 1712 | *     would page one object in at the expense of the other between every | 
|  | 1713 | *     single byte. | 
|  | 1714 | * | 
|  | 1715 | * 1 - Objects can be any size, and have any compatible fencing (X Y, or none | 
|  | 1716 | *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the | 
|  | 1717 | *     object is too large for the available space (or simply too large | 
|  | 1718 | *     for the mappable aperture!), a view is created instead and faulted | 
|  | 1719 | *     into userspace. (This view is aligned and sized appropriately for | 
|  | 1720 | *     fenced access.) | 
|  | 1721 | * | 
|  | 1722 | * Restrictions: | 
|  | 1723 | * | 
|  | 1724 | *  * snoopable objects cannot be accessed via the GTT. It can cause machine | 
|  | 1725 | *    hangs on some architectures, corruption on others. An attempt to service | 
|  | 1726 | *    a GTT page fault from a snoopable object will generate a SIGBUS. | 
|  | 1727 | * | 
|  | 1728 | *  * the object must be able to fit into RAM (physical memory, though no | 
|  | 1729 | *    limited to the mappable aperture). | 
|  | 1730 | * | 
|  | 1731 | * | 
|  | 1732 | * Caveats: | 
|  | 1733 | * | 
|  | 1734 | *  * a new GTT page fault will synchronize rendering from the GPU and flush | 
|  | 1735 | *    all data to system memory. Subsequent access will not be synchronized. | 
|  | 1736 | * | 
|  | 1737 | *  * all mappings are revoked on runtime device suspend. | 
|  | 1738 | * | 
|  | 1739 | *  * there are only 8, 16 or 32 fence registers to share between all users | 
|  | 1740 | *    (older machines require fence register for display and blitter access | 
|  | 1741 | *    as well). Contention of the fence registers will cause the previous users | 
|  | 1742 | *    to be unmapped and any new access will generate new page faults. | 
|  | 1743 | * | 
|  | 1744 | *  * running out of memory while servicing a fault may generate a SIGBUS, | 
|  | 1745 | *    rather than the expected SIGSEGV. | 
|  | 1746 | */ | 
|  | 1747 | int i915_gem_mmap_gtt_version(void) | 
|  | 1748 | { | 
|  | 1749 | return 1; | 
|  | 1750 | } | 
|  | 1751 |  | 
| Chris Wilson | 2d4281b | 2017-01-10 09:56:32 +0000 | [diff] [blame] | 1752 | static inline struct i915_ggtt_view | 
|  | 1753 | compute_partial_view(struct drm_i915_gem_object *obj, | 
| Chris Wilson | 2d4281b | 2017-01-10 09:56:32 +0000 | [diff] [blame] | 1754 | pgoff_t page_offset, | 
|  | 1755 | unsigned int chunk) | 
|  | 1756 | { | 
|  | 1757 | struct i915_ggtt_view view; | 
|  | 1758 |  | 
|  | 1759 | if (i915_gem_object_is_tiled(obj)) | 
|  | 1760 | chunk = roundup(chunk, tile_row_pages(obj)); | 
|  | 1761 |  | 
| Chris Wilson | 2d4281b | 2017-01-10 09:56:32 +0000 | [diff] [blame] | 1762 | view.type = I915_GGTT_VIEW_PARTIAL; | 
| Chris Wilson | 8bab1193 | 2017-01-14 00:28:25 +0000 | [diff] [blame] | 1763 | view.partial.offset = rounddown(page_offset, chunk); | 
|  | 1764 | view.partial.size = | 
| Chris Wilson | 2d4281b | 2017-01-10 09:56:32 +0000 | [diff] [blame] | 1765 | min_t(unsigned int, chunk, | 
| Chris Wilson | 8bab1193 | 2017-01-14 00:28:25 +0000 | [diff] [blame] | 1766 | (obj->base.size >> PAGE_SHIFT) - view.partial.offset); | 
| Chris Wilson | 2d4281b | 2017-01-10 09:56:32 +0000 | [diff] [blame] | 1767 |  | 
|  | 1768 | /* If the partial covers the entire object, just create a normal VMA. */ | 
|  | 1769 | if (chunk >= obj->base.size >> PAGE_SHIFT) | 
|  | 1770 | view.type = I915_GGTT_VIEW_NORMAL; | 
|  | 1771 |  | 
|  | 1772 | return view; | 
|  | 1773 | } | 
|  | 1774 |  | 
| Chris Wilson | 4cc6907 | 2016-08-25 19:05:19 +0100 | [diff] [blame] | 1775 | /** | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1776 | * i915_gem_fault - fault a page into the GTT | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1777 | * @area: CPU VMA in question | 
| Geliang Tang | d9072a3 | 2015-09-15 05:58:44 -0700 | [diff] [blame] | 1778 | * @vmf: fault info | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1779 | * | 
|  | 1780 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped | 
|  | 1781 | * from userspace.  The fault handler takes care of binding the object to | 
|  | 1782 | * the GTT (if needed), allocating and programming a fence register (again, | 
|  | 1783 | * only if needed based on whether the old reg is still valid or the object | 
|  | 1784 | * is tiled) and inserting a new PTE into the faulting process. | 
|  | 1785 | * | 
|  | 1786 | * Note that the faulting process may involve evicting existing objects | 
|  | 1787 | * from the GTT and/or fence registers to make room.  So performance may | 
|  | 1788 | * suffer if the GTT working set is large or there are few fence registers | 
|  | 1789 | * left. | 
| Chris Wilson | 4cc6907 | 2016-08-25 19:05:19 +0100 | [diff] [blame] | 1790 | * | 
|  | 1791 | * The current feature set supported by i915_gem_fault() and thus GTT mmaps | 
|  | 1792 | * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1793 | */ | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1794 | int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1795 | { | 
| Chris Wilson | 03af84f | 2016-08-18 17:17:01 +0100 | [diff] [blame] | 1796 | #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1797 | struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1798 | struct drm_device *dev = obj->base.dev; | 
| Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 1799 | struct drm_i915_private *dev_priv = to_i915(dev); | 
|  | 1800 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1801 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1802 | struct i915_vma *vma; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1803 | pgoff_t page_offset; | 
| Chris Wilson | 8211887 | 2016-08-18 17:17:05 +0100 | [diff] [blame] | 1804 | unsigned int flags; | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1805 | int ret; | 
| Paulo Zanoni | f65c916 | 2013-11-27 18:20:34 -0200 | [diff] [blame] | 1806 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1807 | /* We don't use vmf->pgoff since that has the fake offset */ | 
| Jan Kara | 1a29d85 | 2016-12-14 15:07:01 -0800 | [diff] [blame] | 1808 | page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1809 |  | 
| Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 1810 | trace_i915_gem_object_fault(obj, page_offset, true, write); | 
|  | 1811 |  | 
| Chris Wilson | 6e4930f | 2014-02-07 18:37:06 -0200 | [diff] [blame] | 1812 | /* Try to flush the object off the GPU first without holding the lock. | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1813 | * Upon acquiring the lock, we will perform our sanity checks and then | 
| Chris Wilson | 6e4930f | 2014-02-07 18:37:06 -0200 | [diff] [blame] | 1814 | * repeat the flush holding the lock in the normal manner to catch cases | 
|  | 1815 | * where we are gazumped. | 
|  | 1816 | */ | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1817 | ret = i915_gem_object_wait(obj, | 
|  | 1818 | I915_WAIT_INTERRUPTIBLE, | 
|  | 1819 | MAX_SCHEDULE_TIMEOUT, | 
|  | 1820 | NULL); | 
| Chris Wilson | 6e4930f | 2014-02-07 18:37:06 -0200 | [diff] [blame] | 1821 | if (ret) | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1822 | goto err; | 
|  | 1823 |  | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1824 | ret = i915_gem_object_pin_pages(obj); | 
|  | 1825 | if (ret) | 
|  | 1826 | goto err; | 
|  | 1827 |  | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1828 | intel_runtime_pm_get(dev_priv); | 
|  | 1829 |  | 
|  | 1830 | ret = i915_mutex_lock_interruptible(dev); | 
|  | 1831 | if (ret) | 
|  | 1832 | goto err_rpm; | 
| Chris Wilson | 6e4930f | 2014-02-07 18:37:06 -0200 | [diff] [blame] | 1833 |  | 
| Chris Wilson | eb119bd | 2012-12-16 12:43:36 +0000 | [diff] [blame] | 1834 | /* Access to snoopable pages through the GTT is incoherent. */ | 
| Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 1835 | if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { | 
| Chris Wilson | ddeff6e | 2014-05-28 16:16:41 +0100 | [diff] [blame] | 1836 | ret = -EFAULT; | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1837 | goto err_unlock; | 
| Chris Wilson | eb119bd | 2012-12-16 12:43:36 +0000 | [diff] [blame] | 1838 | } | 
|  | 1839 |  | 
| Chris Wilson | 8211887 | 2016-08-18 17:17:05 +0100 | [diff] [blame] | 1840 | /* If the object is smaller than a couple of partial vma, it is | 
|  | 1841 | * not worth only creating a single partial vma - we may as well | 
|  | 1842 | * clear enough space for the full object. | 
|  | 1843 | */ | 
|  | 1844 | flags = PIN_MAPPABLE; | 
|  | 1845 | if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) | 
|  | 1846 | flags |= PIN_NONBLOCK | PIN_NONFAULT; | 
|  | 1847 |  | 
| Chris Wilson | a61007a | 2016-08-18 17:17:02 +0100 | [diff] [blame] | 1848 | /* Now pin it into the GTT as needed */ | 
| Chris Wilson | 8211887 | 2016-08-18 17:17:05 +0100 | [diff] [blame] | 1849 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); | 
| Chris Wilson | a61007a | 2016-08-18 17:17:02 +0100 | [diff] [blame] | 1850 | if (IS_ERR(vma)) { | 
| Chris Wilson | a61007a | 2016-08-18 17:17:02 +0100 | [diff] [blame] | 1851 | /* Use a partial view if it is bigger than available space */ | 
| Chris Wilson | 2d4281b | 2017-01-10 09:56:32 +0000 | [diff] [blame] | 1852 | struct i915_ggtt_view view = | 
| Chris Wilson | 8201c1f | 2017-01-10 09:56:33 +0000 | [diff] [blame] | 1853 | compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); | 
| Chris Wilson | aa136d9 | 2016-08-18 17:17:03 +0100 | [diff] [blame] | 1854 |  | 
| Chris Wilson | 5034924 | 2016-08-18 17:17:04 +0100 | [diff] [blame] | 1855 | /* Userspace is now writing through an untracked VMA, abandon | 
|  | 1856 | * all hope that the hardware is able to track future writes. | 
|  | 1857 | */ | 
|  | 1858 | obj->frontbuffer_ggtt_origin = ORIGIN_CPU; | 
|  | 1859 |  | 
| Chris Wilson | a61007a | 2016-08-18 17:17:02 +0100 | [diff] [blame] | 1860 | vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); | 
|  | 1861 | } | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1862 | if (IS_ERR(vma)) { | 
|  | 1863 | ret = PTR_ERR(vma); | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1864 | goto err_unlock; | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1865 | } | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1866 |  | 
| Chris Wilson | c983930 | 2012-11-20 10:45:17 +0000 | [diff] [blame] | 1867 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | 
|  | 1868 | if (ret) | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1869 | goto err_unpin; | 
| Chris Wilson | c983930 | 2012-11-20 10:45:17 +0000 | [diff] [blame] | 1870 |  | 
| Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 1871 | ret = i915_vma_get_fence(vma); | 
| Chris Wilson | c983930 | 2012-11-20 10:45:17 +0000 | [diff] [blame] | 1872 | if (ret) | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1873 | goto err_unpin; | 
| Chris Wilson | 7d1c480 | 2010-08-07 21:45:03 +0100 | [diff] [blame] | 1874 |  | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 1875 | /* Mark as being mmapped into userspace for later revocation */ | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1876 | assert_rpm_wakelock_held(dev_priv); | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 1877 | if (list_empty(&obj->userfault_link)) | 
|  | 1878 | list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 1879 |  | 
| Chris Wilson | b90b91d | 2014-06-10 12:14:40 +0100 | [diff] [blame] | 1880 | /* Finally, remap it using the new GTT offset */ | 
| Chris Wilson | c58305a | 2016-08-19 16:54:28 +0100 | [diff] [blame] | 1881 | ret = remap_io_mapping(area, | 
| Chris Wilson | 8bab1193 | 2017-01-14 00:28:25 +0000 | [diff] [blame] | 1882 | area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), | 
| Chris Wilson | c58305a | 2016-08-19 16:54:28 +0100 | [diff] [blame] | 1883 | (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, | 
|  | 1884 | min_t(u64, vma->size, area->vm_end - area->vm_start), | 
|  | 1885 | &ggtt->mappable); | 
| Chris Wilson | a61007a | 2016-08-18 17:17:02 +0100 | [diff] [blame] | 1886 |  | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1887 | err_unpin: | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1888 | __i915_vma_unpin(vma); | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1889 | err_unlock: | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1890 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1891 | err_rpm: | 
|  | 1892 | intel_runtime_pm_put(dev_priv); | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 1893 | i915_gem_object_unpin_pages(obj); | 
| Chris Wilson | b8f9096 | 2016-08-05 10:14:07 +0100 | [diff] [blame] | 1894 | err: | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1895 | switch (ret) { | 
| Chris Wilson | d9bc7e9 | 2011-02-07 13:09:31 +0000 | [diff] [blame] | 1896 | case -EIO: | 
| Daniel Vetter | 2232f03 | 2014-09-04 09:36:18 +0200 | [diff] [blame] | 1897 | /* | 
|  | 1898 | * We eat errors when the gpu is terminally wedged to avoid | 
|  | 1899 | * userspace unduly crashing (gl has no provisions for mmaps to | 
|  | 1900 | * fail). But any other -EIO isn't ours (e.g. swap in failure) | 
|  | 1901 | * and so needs to be reported. | 
|  | 1902 | */ | 
|  | 1903 | if (!i915_terminally_wedged(&dev_priv->gpu_error)) { | 
| Paulo Zanoni | f65c916 | 2013-11-27 18:20:34 -0200 | [diff] [blame] | 1904 | ret = VM_FAULT_SIGBUS; | 
|  | 1905 | break; | 
|  | 1906 | } | 
| Chris Wilson | 045e769 | 2010-11-07 09:18:22 +0000 | [diff] [blame] | 1907 | case -EAGAIN: | 
| Daniel Vetter | 571c608 | 2013-09-12 17:57:28 +0200 | [diff] [blame] | 1908 | /* | 
|  | 1909 | * EAGAIN means the gpu is hung and we'll wait for the error | 
|  | 1910 | * handler to reset everything when re-faulting in | 
|  | 1911 | * i915_mutex_lock_interruptible. | 
| Chris Wilson | d9bc7e9 | 2011-02-07 13:09:31 +0000 | [diff] [blame] | 1912 | */ | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1913 | case 0: | 
|  | 1914 | case -ERESTARTSYS: | 
| Chris Wilson | bed636a | 2011-02-11 20:31:19 +0000 | [diff] [blame] | 1915 | case -EINTR: | 
| Dmitry Rogozhkin | e79e0fe | 2012-10-03 17:15:26 +0300 | [diff] [blame] | 1916 | case -EBUSY: | 
|  | 1917 | /* | 
|  | 1918 | * EBUSY is ok: this just means that another thread | 
|  | 1919 | * already did the job. | 
|  | 1920 | */ | 
| Paulo Zanoni | f65c916 | 2013-11-27 18:20:34 -0200 | [diff] [blame] | 1921 | ret = VM_FAULT_NOPAGE; | 
|  | 1922 | break; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1923 | case -ENOMEM: | 
| Paulo Zanoni | f65c916 | 2013-11-27 18:20:34 -0200 | [diff] [blame] | 1924 | ret = VM_FAULT_OOM; | 
|  | 1925 | break; | 
| Daniel Vetter | a7c2e1a | 2012-10-17 11:17:16 +0200 | [diff] [blame] | 1926 | case -ENOSPC: | 
| Chris Wilson | 45d6781 | 2014-01-31 11:34:57 +0000 | [diff] [blame] | 1927 | case -EFAULT: | 
| Paulo Zanoni | f65c916 | 2013-11-27 18:20:34 -0200 | [diff] [blame] | 1928 | ret = VM_FAULT_SIGBUS; | 
|  | 1929 | break; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1930 | default: | 
| Daniel Vetter | a7c2e1a | 2012-10-17 11:17:16 +0200 | [diff] [blame] | 1931 | WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); | 
| Paulo Zanoni | f65c916 | 2013-11-27 18:20:34 -0200 | [diff] [blame] | 1932 | ret = VM_FAULT_SIGBUS; | 
|  | 1933 | break; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1934 | } | 
| Paulo Zanoni | f65c916 | 2013-11-27 18:20:34 -0200 | [diff] [blame] | 1935 | return ret; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1936 | } | 
|  | 1937 |  | 
|  | 1938 | /** | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1939 | * i915_gem_release_mmap - remove physical page mappings | 
|  | 1940 | * @obj: obj in question | 
|  | 1941 | * | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 1942 | * Preserve the reservation of the mmapping with the DRM core code, but | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1943 | * relinquish ownership of the pages back to the system. | 
|  | 1944 | * | 
|  | 1945 | * It is vital that we remove the page mapping if we have mapped a tiled | 
|  | 1946 | * object through the GTT and then lose the fence register due to | 
|  | 1947 | * resource pressure. Similarly if the object has been moved out of the | 
|  | 1948 | * aperture, than pages mapped into userspace must be revoked. Removing the | 
|  | 1949 | * mapping will then trigger a page fault on the next user access, allowing | 
|  | 1950 | * fixup by i915_gem_fault(). | 
|  | 1951 | */ | 
| Eric Anholt | d05ca30 | 2009-07-10 13:02:26 -0700 | [diff] [blame] | 1952 | void | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1953 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1954 | { | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 1955 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 1956 |  | 
| Chris Wilson | 349f2cc | 2016-04-13 17:35:12 +0100 | [diff] [blame] | 1957 | /* Serialisation between user GTT access and our code depends upon | 
|  | 1958 | * revoking the CPU's PTE whilst the mutex is held. The next user | 
|  | 1959 | * pagefault then has to wait until we release the mutex. | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1960 | * | 
|  | 1961 | * Note that RPM complicates somewhat by adding an additional | 
|  | 1962 | * requirement that operations to the GGTT be made holding the RPM | 
|  | 1963 | * wakeref. | 
| Chris Wilson | 349f2cc | 2016-04-13 17:35:12 +0100 | [diff] [blame] | 1964 | */ | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 1965 | lockdep_assert_held(&i915->drm.struct_mutex); | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1966 | intel_runtime_pm_get(i915); | 
| Chris Wilson | 349f2cc | 2016-04-13 17:35:12 +0100 | [diff] [blame] | 1967 |  | 
| Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 1968 | if (list_empty(&obj->userfault_link)) | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1969 | goto out; | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1970 |  | 
| Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 1971 | list_del_init(&obj->userfault_link); | 
| David Herrmann | 6796cb1 | 2014-01-03 14:24:19 +0100 | [diff] [blame] | 1972 | drm_vma_node_unmap(&obj->base.vma_node, | 
|  | 1973 | obj->base.dev->anon_inode->i_mapping); | 
| Chris Wilson | 349f2cc | 2016-04-13 17:35:12 +0100 | [diff] [blame] | 1974 |  | 
|  | 1975 | /* Ensure that the CPU's PTE are revoked and there are not outstanding | 
|  | 1976 | * memory transactions from userspace before we return. The TLB | 
|  | 1977 | * flushing implied above by changing the PTE above *should* be | 
|  | 1978 | * sufficient, an extra barrier here just provides us with a bit | 
|  | 1979 | * of paranoid documentation about our requirement to serialise | 
|  | 1980 | * memory writes before touching registers / GSM. | 
|  | 1981 | */ | 
|  | 1982 | wmb(); | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1983 |  | 
|  | 1984 | out: | 
|  | 1985 | intel_runtime_pm_put(i915); | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1986 | } | 
|  | 1987 |  | 
| Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 1988 | void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) | 
| Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 1989 | { | 
| Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 1990 | struct drm_i915_gem_object *obj, *on; | 
| Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 1991 | int i; | 
| Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 1992 |  | 
| Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 1993 | /* | 
|  | 1994 | * Only called during RPM suspend. All users of the userfault_list | 
|  | 1995 | * must be holding an RPM wakeref to ensure that this can not | 
|  | 1996 | * run concurrently with themselves (and use the struct_mutex for | 
|  | 1997 | * protection between themselves). | 
|  | 1998 | */ | 
|  | 1999 |  | 
|  | 2000 | list_for_each_entry_safe(obj, on, | 
|  | 2001 | &dev_priv->mm.userfault_list, userfault_link) { | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 2002 | list_del_init(&obj->userfault_link); | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 2003 | drm_vma_node_unmap(&obj->base.vma_node, | 
|  | 2004 | obj->base.dev->anon_inode->i_mapping); | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 2005 | } | 
| Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 2006 |  | 
|  | 2007 | /* The fence will be lost when the device powers down. If any were | 
|  | 2008 | * in use by hardware (i.e. they are pinned), we should not be powering | 
|  | 2009 | * down! All other fences will be reacquired by the user upon waking. | 
|  | 2010 | */ | 
|  | 2011 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 
|  | 2012 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 
|  | 2013 |  | 
|  | 2014 | if (WARN_ON(reg->pin_count)) | 
|  | 2015 | continue; | 
|  | 2016 |  | 
|  | 2017 | if (!reg->vma) | 
|  | 2018 | continue; | 
|  | 2019 |  | 
|  | 2020 | GEM_BUG_ON(!list_empty(®->vma->obj->userfault_link)); | 
|  | 2021 | reg->dirty = true; | 
|  | 2022 | } | 
| Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 2023 | } | 
|  | 2024 |  | 
| Chris Wilson | d8cb508 | 2012-08-11 15:41:03 +0100 | [diff] [blame] | 2025 | static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) | 
|  | 2026 | { | 
| Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 2027 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | 
| Chris Wilson | f3f6184 | 2016-08-05 10:14:14 +0100 | [diff] [blame] | 2028 | int err; | 
| Chris Wilson | d8cb508 | 2012-08-11 15:41:03 +0100 | [diff] [blame] | 2029 |  | 
| Chris Wilson | f3f6184 | 2016-08-05 10:14:14 +0100 | [diff] [blame] | 2030 | err = drm_gem_create_mmap_offset(&obj->base); | 
| Chris Wilson | b42a13d | 2017-01-06 15:22:40 +0000 | [diff] [blame] | 2031 | if (likely(!err)) | 
| Chris Wilson | f3f6184 | 2016-08-05 10:14:14 +0100 | [diff] [blame] | 2032 | return 0; | 
| Daniel Vetter | da494d7 | 2012-12-20 15:11:16 +0100 | [diff] [blame] | 2033 |  | 
| Chris Wilson | b42a13d | 2017-01-06 15:22:40 +0000 | [diff] [blame] | 2034 | /* Attempt to reap some mmap space from dead objects */ | 
|  | 2035 | do { | 
|  | 2036 | err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); | 
|  | 2037 | if (err) | 
|  | 2038 | break; | 
| Chris Wilson | d8cb508 | 2012-08-11 15:41:03 +0100 | [diff] [blame] | 2039 |  | 
| Chris Wilson | b42a13d | 2017-01-06 15:22:40 +0000 | [diff] [blame] | 2040 | i915_gem_drain_freed_objects(dev_priv); | 
| Chris Wilson | f3f6184 | 2016-08-05 10:14:14 +0100 | [diff] [blame] | 2041 | err = drm_gem_create_mmap_offset(&obj->base); | 
| Chris Wilson | b42a13d | 2017-01-06 15:22:40 +0000 | [diff] [blame] | 2042 | if (!err) | 
|  | 2043 | break; | 
|  | 2044 |  | 
|  | 2045 | } while (flush_delayed_work(&dev_priv->gt.retire_work)); | 
| Daniel Vetter | da494d7 | 2012-12-20 15:11:16 +0100 | [diff] [blame] | 2046 |  | 
| Chris Wilson | f3f6184 | 2016-08-05 10:14:14 +0100 | [diff] [blame] | 2047 | return err; | 
| Chris Wilson | d8cb508 | 2012-08-11 15:41:03 +0100 | [diff] [blame] | 2048 | } | 
|  | 2049 |  | 
|  | 2050 | static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) | 
|  | 2051 | { | 
| Chris Wilson | d8cb508 | 2012-08-11 15:41:03 +0100 | [diff] [blame] | 2052 | drm_gem_free_mmap_offset(&obj->base); | 
|  | 2053 | } | 
|  | 2054 |  | 
| Dave Airlie | da6b51d | 2014-12-24 13:11:17 +1000 | [diff] [blame] | 2055 | int | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 2056 | i915_gem_mmap_gtt(struct drm_file *file, | 
|  | 2057 | struct drm_device *dev, | 
| Dave Airlie | da6b51d | 2014-12-24 13:11:17 +1000 | [diff] [blame] | 2058 | uint32_t handle, | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 2059 | uint64_t *offset) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2060 | { | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 2061 | struct drm_i915_gem_object *obj; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2062 | int ret; | 
|  | 2063 |  | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 2064 | obj = i915_gem_object_lookup(file, handle); | 
| Chris Wilson | f3f6184 | 2016-08-05 10:14:14 +0100 | [diff] [blame] | 2065 | if (!obj) | 
|  | 2066 | return -ENOENT; | 
| Chris Wilson | ab18282 | 2009-09-22 18:46:17 +0100 | [diff] [blame] | 2067 |  | 
| Chris Wilson | d8cb508 | 2012-08-11 15:41:03 +0100 | [diff] [blame] | 2068 | ret = i915_gem_object_create_mmap_offset(obj); | 
| Chris Wilson | f3f6184 | 2016-08-05 10:14:14 +0100 | [diff] [blame] | 2069 | if (ret == 0) | 
|  | 2070 | *offset = drm_vma_node_offset_addr(&obj->base.vma_node); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2071 |  | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 2072 | i915_gem_object_put(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 2073 | return ret; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2074 | } | 
|  | 2075 |  | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 2076 | /** | 
|  | 2077 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | 
|  | 2078 | * @dev: DRM device | 
|  | 2079 | * @data: GTT mapping ioctl data | 
|  | 2080 | * @file: GEM object info | 
|  | 2081 | * | 
|  | 2082 | * Simply returns the fake offset to userspace so it can mmap it. | 
|  | 2083 | * The mmap call will end up in drm_gem_mmap(), which will set things | 
|  | 2084 | * up so we can get faults in the handler above. | 
|  | 2085 | * | 
|  | 2086 | * The fault handler will take care of binding the object into the GTT | 
|  | 2087 | * (since it may have been evicted to make room for something), allocating | 
|  | 2088 | * a fence register, and mapping the appropriate aperture address into | 
|  | 2089 | * userspace. | 
|  | 2090 | */ | 
|  | 2091 | int | 
|  | 2092 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | 
|  | 2093 | struct drm_file *file) | 
|  | 2094 | { | 
|  | 2095 | struct drm_i915_gem_mmap_gtt *args = data; | 
|  | 2096 |  | 
| Dave Airlie | da6b51d | 2014-12-24 13:11:17 +1000 | [diff] [blame] | 2097 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); | 
| Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 2098 | } | 
|  | 2099 |  | 
| Daniel Vetter | 225067e | 2012-08-20 10:23:20 +0200 | [diff] [blame] | 2100 | /* Immediately discard the backing storage */ | 
|  | 2101 | static void | 
|  | 2102 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 2103 | { | 
| Chris Wilson | 4d6294bf | 2012-08-11 15:41:05 +0100 | [diff] [blame] | 2104 | i915_gem_object_free_mmap_offset(obj); | 
| Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 2105 |  | 
| Chris Wilson | 4d6294bf | 2012-08-11 15:41:05 +0100 | [diff] [blame] | 2106 | if (obj->base.filp == NULL) | 
|  | 2107 | return; | 
|  | 2108 |  | 
| Daniel Vetter | 225067e | 2012-08-20 10:23:20 +0200 | [diff] [blame] | 2109 | /* Our goal here is to return as much of the memory as | 
|  | 2110 | * is possible back to the system as we are called from OOM. | 
|  | 2111 | * To do this we must instruct the shmfs to drop all of its | 
|  | 2112 | * backing pages, *now*. | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 2113 | */ | 
| Chris Wilson | 5537252 | 2014-03-25 13:23:06 +0000 | [diff] [blame] | 2114 | shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2115 | obj->mm.madv = __I915_MADV_PURGED; | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 2116 | } | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 2117 |  | 
| Chris Wilson | 5537252 | 2014-03-25 13:23:06 +0000 | [diff] [blame] | 2118 | /* Try to discard unwanted pages */ | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2119 | void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) | 
| Daniel Vetter | 225067e | 2012-08-20 10:23:20 +0200 | [diff] [blame] | 2120 | { | 
| Chris Wilson | 5537252 | 2014-03-25 13:23:06 +0000 | [diff] [blame] | 2121 | struct address_space *mapping; | 
|  | 2122 |  | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2123 | lockdep_assert_held(&obj->mm.lock); | 
|  | 2124 | GEM_BUG_ON(obj->mm.pages); | 
|  | 2125 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2126 | switch (obj->mm.madv) { | 
| Chris Wilson | 5537252 | 2014-03-25 13:23:06 +0000 | [diff] [blame] | 2127 | case I915_MADV_DONTNEED: | 
|  | 2128 | i915_gem_object_truncate(obj); | 
|  | 2129 | case __I915_MADV_PURGED: | 
|  | 2130 | return; | 
|  | 2131 | } | 
|  | 2132 |  | 
|  | 2133 | if (obj->base.filp == NULL) | 
|  | 2134 | return; | 
|  | 2135 |  | 
| Al Viro | 93c76a3 | 2015-12-04 23:45:44 -0500 | [diff] [blame] | 2136 | mapping = obj->base.filp->f_mapping, | 
| Chris Wilson | 5537252 | 2014-03-25 13:23:06 +0000 | [diff] [blame] | 2137 | invalidate_mapping_pages(mapping, 0, (loff_t)-1); | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 2138 | } | 
|  | 2139 |  | 
| Chris Wilson | 5cdf588 | 2010-09-27 15:51:07 +0100 | [diff] [blame] | 2140 | static void | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2141 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, | 
|  | 2142 | struct sg_table *pages) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2143 | { | 
| Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2144 | struct sgt_iter sgt_iter; | 
|  | 2145 | struct page *page; | 
| Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 2146 |  | 
| Chris Wilson | e5facdf | 2016-12-23 14:57:57 +0000 | [diff] [blame] | 2147 | __i915_gem_object_release_shmem(obj, pages, true); | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2148 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2149 | i915_gem_gtt_finish_pages(obj, pages); | 
| Imre Deak | e227330 | 2015-07-09 12:59:05 +0300 | [diff] [blame] | 2150 |  | 
| Daniel Vetter | 6dacfd2 | 2011-09-12 21:30:02 +0200 | [diff] [blame] | 2151 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2152 | i915_gem_object_save_bit_17_swizzle(obj, pages); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 2153 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2154 | for_each_sgt_page(page, sgt_iter, pages) { | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2155 | if (obj->mm.dirty) | 
| Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 2156 | set_page_dirty(page); | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 2157 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2158 | if (obj->mm.madv == I915_MADV_WILLNEED) | 
| Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 2159 | mark_page_accessed(page); | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 2160 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2161 | put_page(page); | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 2162 | } | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2163 | obj->mm.dirty = false; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2164 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2165 | sg_free_table(pages); | 
|  | 2166 | kfree(pages); | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 2167 | } | 
|  | 2168 |  | 
| Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 2169 | static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) | 
|  | 2170 | { | 
|  | 2171 | struct radix_tree_iter iter; | 
|  | 2172 | void **slot; | 
|  | 2173 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2174 | radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) | 
|  | 2175 | radix_tree_delete(&obj->mm.get_page.radix, iter.index); | 
| Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 2176 | } | 
|  | 2177 |  | 
| Chris Wilson | 548625e | 2016-11-01 12:11:34 +0000 | [diff] [blame] | 2178 | void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, | 
|  | 2179 | enum i915_mm_subclass subclass) | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 2180 | { | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2181 | struct sg_table *pages; | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 2182 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2183 | if (i915_gem_object_has_pinned_pages(obj)) | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2184 | return; | 
| Chris Wilson | a557017 | 2012-09-04 21:02:54 +0100 | [diff] [blame] | 2185 |  | 
| Chris Wilson | 15717de | 2016-08-04 07:52:26 +0100 | [diff] [blame] | 2186 | GEM_BUG_ON(obj->bind_count); | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2187 | if (!READ_ONCE(obj->mm.pages)) | 
|  | 2188 | return; | 
|  | 2189 |  | 
|  | 2190 | /* May be called by shrinker from within get_pages() (on another bo) */ | 
| Chris Wilson | 548625e | 2016-11-01 12:11:34 +0000 | [diff] [blame] | 2191 | mutex_lock_nested(&obj->mm.lock, subclass); | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2192 | if (unlikely(atomic_read(&obj->mm.pages_pin_count))) | 
|  | 2193 | goto unlock; | 
| Ben Widawsky | 3e12302 | 2013-07-31 17:00:04 -0700 | [diff] [blame] | 2194 |  | 
| Chris Wilson | a2165e3 | 2012-12-03 11:49:00 +0000 | [diff] [blame] | 2195 | /* ->put_pages might need to allocate memory for the bit17 swizzle | 
|  | 2196 | * array, hence protect them from being reaped by removing them from gtt | 
|  | 2197 | * lists early. */ | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2198 | pages = fetch_and_zero(&obj->mm.pages); | 
|  | 2199 | GEM_BUG_ON(!pages); | 
| Chris Wilson | a2165e3 | 2012-12-03 11:49:00 +0000 | [diff] [blame] | 2200 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2201 | if (obj->mm.mapping) { | 
| Chris Wilson | 4b30cb2 | 2016-08-18 17:16:42 +0100 | [diff] [blame] | 2202 | void *ptr; | 
|  | 2203 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2204 | ptr = ptr_mask_bits(obj->mm.mapping); | 
| Chris Wilson | 4b30cb2 | 2016-08-18 17:16:42 +0100 | [diff] [blame] | 2205 | if (is_vmalloc_addr(ptr)) | 
|  | 2206 | vunmap(ptr); | 
| Chris Wilson | fb8621d | 2016-04-08 12:11:14 +0100 | [diff] [blame] | 2207 | else | 
| Chris Wilson | 4b30cb2 | 2016-08-18 17:16:42 +0100 | [diff] [blame] | 2208 | kunmap(kmap_to_page(ptr)); | 
|  | 2209 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2210 | obj->mm.mapping = NULL; | 
| Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 2211 | } | 
|  | 2212 |  | 
| Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 2213 | __i915_gem_object_reset_page_iter(obj); | 
|  | 2214 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2215 | obj->ops->put_pages(obj, pages); | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2216 | unlock: | 
|  | 2217 | mutex_unlock(&obj->mm.lock); | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 2218 | } | 
|  | 2219 |  | 
| Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 2220 | static void i915_sg_trim(struct sg_table *orig_st) | 
|  | 2221 | { | 
|  | 2222 | struct sg_table new_st; | 
|  | 2223 | struct scatterlist *sg, *new_sg; | 
|  | 2224 | unsigned int i; | 
|  | 2225 |  | 
|  | 2226 | if (orig_st->nents == orig_st->orig_nents) | 
|  | 2227 | return; | 
|  | 2228 |  | 
| Chris Wilson | 8bfc478f | 2016-12-23 14:57:58 +0000 | [diff] [blame] | 2229 | if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) | 
| Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 2230 | return; | 
|  | 2231 |  | 
|  | 2232 | new_sg = new_st.sgl; | 
|  | 2233 | for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { | 
|  | 2234 | sg_set_page(new_sg, sg_page(sg), sg->length, 0); | 
|  | 2235 | /* called before being DMA mapped, no need to copy sg->dma_* */ | 
|  | 2236 | new_sg = sg_next(new_sg); | 
|  | 2237 | } | 
| Chris Wilson | c2dc6cc | 2016-12-19 12:43:46 +0000 | [diff] [blame] | 2238 | GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ | 
| Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 2239 |  | 
|  | 2240 | sg_free_table(orig_st); | 
|  | 2241 |  | 
|  | 2242 | *orig_st = new_st; | 
|  | 2243 | } | 
|  | 2244 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2245 | static struct sg_table * | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 2246 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2247 | { | 
| Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 2248 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | 
| Chris Wilson | d766ef5 | 2016-12-19 12:43:45 +0000 | [diff] [blame] | 2249 | const unsigned long page_count = obj->base.size / PAGE_SIZE; | 
|  | 2250 | unsigned long i; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2251 | struct address_space *mapping; | 
| Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 2252 | struct sg_table *st; | 
|  | 2253 | struct scatterlist *sg; | 
| Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2254 | struct sgt_iter sgt_iter; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2255 | struct page *page; | 
| Imre Deak | 90797e6 | 2013-02-18 19:28:03 +0200 | [diff] [blame] | 2256 | unsigned long last_pfn = 0;	/* suppress gcc warning */ | 
| Chris Wilson | 4ff340f0 | 2016-10-18 13:02:50 +0100 | [diff] [blame] | 2257 | unsigned int max_segment; | 
| Imre Deak | e227330 | 2015-07-09 12:59:05 +0300 | [diff] [blame] | 2258 | int ret; | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 2259 | gfp_t gfp; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2260 |  | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 2261 | /* Assert that the object is not currently in any GPU domain. As it | 
|  | 2262 | * wasn't in the GTT, there shouldn't be any way it could have been in | 
|  | 2263 | * a GPU cache | 
|  | 2264 | */ | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2265 | GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); | 
|  | 2266 | GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 2267 |  | 
| Konrad Rzeszutek Wilk | 7453c54 | 2016-12-20 10:02:02 -0500 | [diff] [blame] | 2268 | max_segment = swiotlb_max_segment(); | 
| Chris Wilson | 871dfbd | 2016-10-11 09:20:21 +0100 | [diff] [blame] | 2269 | if (!max_segment) | 
| Chris Wilson | 4ff340f0 | 2016-10-18 13:02:50 +0100 | [diff] [blame] | 2270 | max_segment = rounddown(UINT_MAX, PAGE_SIZE); | 
| Chris Wilson | 871dfbd | 2016-10-11 09:20:21 +0100 | [diff] [blame] | 2271 |  | 
| Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 2272 | st = kmalloc(sizeof(*st), GFP_KERNEL); | 
|  | 2273 | if (st == NULL) | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2274 | return ERR_PTR(-ENOMEM); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2275 |  | 
| Chris Wilson | d766ef5 | 2016-12-19 12:43:45 +0000 | [diff] [blame] | 2276 | rebuild_st: | 
| Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 2277 | if (sg_alloc_table(st, page_count, GFP_KERNEL)) { | 
| Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 2278 | kfree(st); | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2279 | return ERR_PTR(-ENOMEM); | 
| Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 2280 | } | 
|  | 2281 |  | 
|  | 2282 | /* Get the list of pages out of our struct file.  They'll be pinned | 
|  | 2283 | * at this point until we release them. | 
|  | 2284 | * | 
|  | 2285 | * Fail silently without starting the shrinker | 
|  | 2286 | */ | 
| Al Viro | 93c76a3 | 2015-12-04 23:45:44 -0500 | [diff] [blame] | 2287 | mapping = obj->base.filp->f_mapping; | 
| Michal Hocko | c62d255 | 2015-11-06 16:28:49 -0800 | [diff] [blame] | 2288 | gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); | 
| Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 2289 | gfp |= __GFP_NORETRY | __GFP_NOWARN; | 
| Imre Deak | 90797e6 | 2013-02-18 19:28:03 +0200 | [diff] [blame] | 2290 | sg = st->sgl; | 
|  | 2291 | st->nents = 0; | 
|  | 2292 | for (i = 0; i < page_count; i++) { | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 2293 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | 
|  | 2294 | if (IS_ERR(page)) { | 
| Chris Wilson | 21ab4e7 | 2014-09-09 11:16:08 +0100 | [diff] [blame] | 2295 | i915_gem_shrink(dev_priv, | 
|  | 2296 | page_count, | 
|  | 2297 | I915_SHRINK_BOUND | | 
|  | 2298 | I915_SHRINK_UNBOUND | | 
|  | 2299 | I915_SHRINK_PURGEABLE); | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 2300 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | 
|  | 2301 | } | 
|  | 2302 | if (IS_ERR(page)) { | 
|  | 2303 | /* We've tried hard to allocate the memory by reaping | 
|  | 2304 | * our own buffer, now let the real VM do its job and | 
|  | 2305 | * go down in flames if truly OOM. | 
|  | 2306 | */ | 
| David Herrmann | f461d1b | 2014-05-25 14:34:10 +0200 | [diff] [blame] | 2307 | page = shmem_read_mapping_page(mapping, i); | 
| Imre Deak | e227330 | 2015-07-09 12:59:05 +0300 | [diff] [blame] | 2308 | if (IS_ERR(page)) { | 
|  | 2309 | ret = PTR_ERR(page); | 
| Chris Wilson | b17993b | 2016-11-14 11:29:30 +0000 | [diff] [blame] | 2310 | goto err_sg; | 
| Imre Deak | e227330 | 2015-07-09 12:59:05 +0300 | [diff] [blame] | 2311 | } | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 2312 | } | 
| Chris Wilson | 871dfbd | 2016-10-11 09:20:21 +0100 | [diff] [blame] | 2313 | if (!i || | 
|  | 2314 | sg->length >= max_segment || | 
|  | 2315 | page_to_pfn(page) != last_pfn + 1) { | 
| Imre Deak | 90797e6 | 2013-02-18 19:28:03 +0200 | [diff] [blame] | 2316 | if (i) | 
|  | 2317 | sg = sg_next(sg); | 
|  | 2318 | st->nents++; | 
|  | 2319 | sg_set_page(sg, page, PAGE_SIZE, 0); | 
|  | 2320 | } else { | 
|  | 2321 | sg->length += PAGE_SIZE; | 
|  | 2322 | } | 
|  | 2323 | last_pfn = page_to_pfn(page); | 
| Daniel Vetter | 3bbbe70 | 2013-10-07 17:15:45 -0300 | [diff] [blame] | 2324 |  | 
|  | 2325 | /* Check that the i965g/gm workaround works. */ | 
|  | 2326 | WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2327 | } | 
| Chris Wilson | 871dfbd | 2016-10-11 09:20:21 +0100 | [diff] [blame] | 2328 | if (sg) /* loop terminated early; short sg table */ | 
| Konrad Rzeszutek Wilk | 426729d | 2013-06-24 11:47:48 -0400 | [diff] [blame] | 2329 | sg_mark_end(sg); | 
| Chris Wilson | 74ce6b6 | 2012-10-19 15:51:06 +0100 | [diff] [blame] | 2330 |  | 
| Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 2331 | /* Trim unused sg entries to avoid wasting memory. */ | 
|  | 2332 | i915_sg_trim(st); | 
|  | 2333 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2334 | ret = i915_gem_gtt_prepare_pages(obj, st); | 
| Chris Wilson | d766ef5 | 2016-12-19 12:43:45 +0000 | [diff] [blame] | 2335 | if (ret) { | 
|  | 2336 | /* DMA remapping failed? One possible cause is that | 
|  | 2337 | * it could not reserve enough large entries, asking | 
|  | 2338 | * for PAGE_SIZE chunks instead may be helpful. | 
|  | 2339 | */ | 
|  | 2340 | if (max_segment > PAGE_SIZE) { | 
|  | 2341 | for_each_sgt_page(page, sgt_iter, st) | 
|  | 2342 | put_page(page); | 
|  | 2343 | sg_free_table(st); | 
|  | 2344 |  | 
|  | 2345 | max_segment = PAGE_SIZE; | 
|  | 2346 | goto rebuild_st; | 
|  | 2347 | } else { | 
|  | 2348 | dev_warn(&dev_priv->drm.pdev->dev, | 
|  | 2349 | "Failed to DMA remap %lu pages\n", | 
|  | 2350 | page_count); | 
|  | 2351 | goto err_pages; | 
|  | 2352 | } | 
|  | 2353 | } | 
| Imre Deak | e227330 | 2015-07-09 12:59:05 +0300 | [diff] [blame] | 2354 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2355 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2356 | i915_gem_object_do_bit_17_swizzle(obj, st); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2357 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2358 | return st; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2359 |  | 
| Chris Wilson | b17993b | 2016-11-14 11:29:30 +0000 | [diff] [blame] | 2360 | err_sg: | 
| Imre Deak | 90797e6 | 2013-02-18 19:28:03 +0200 | [diff] [blame] | 2361 | sg_mark_end(sg); | 
| Chris Wilson | b17993b | 2016-11-14 11:29:30 +0000 | [diff] [blame] | 2362 | err_pages: | 
| Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2363 | for_each_sgt_page(page, sgt_iter, st) | 
|  | 2364 | put_page(page); | 
| Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 2365 | sg_free_table(st); | 
|  | 2366 | kfree(st); | 
| Chris Wilson | 0820baf | 2014-03-25 13:23:03 +0000 | [diff] [blame] | 2367 |  | 
|  | 2368 | /* shmemfs first checks if there is enough memory to allocate the page | 
|  | 2369 | * and reports ENOSPC should there be insufficient, along with the usual | 
|  | 2370 | * ENOMEM for a genuine allocation failure. | 
|  | 2371 | * | 
|  | 2372 | * We use ENOSPC in our driver to mean that we have run out of aperture | 
|  | 2373 | * space and so want to translate the error from shmemfs back to our | 
|  | 2374 | * usual understanding of ENOMEM. | 
|  | 2375 | */ | 
| Imre Deak | e227330 | 2015-07-09 12:59:05 +0300 | [diff] [blame] | 2376 | if (ret == -ENOSPC) | 
|  | 2377 | ret = -ENOMEM; | 
|  | 2378 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2379 | return ERR_PTR(ret); | 
|  | 2380 | } | 
|  | 2381 |  | 
|  | 2382 | void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, | 
|  | 2383 | struct sg_table *pages) | 
|  | 2384 | { | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2385 | lockdep_assert_held(&obj->mm.lock); | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2386 |  | 
|  | 2387 | obj->mm.get_page.sg_pos = pages->sgl; | 
|  | 2388 | obj->mm.get_page.sg_idx = 0; | 
|  | 2389 |  | 
|  | 2390 | obj->mm.pages = pages; | 
| Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 2391 |  | 
|  | 2392 | if (i915_gem_object_is_tiled(obj) && | 
|  | 2393 | to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) { | 
|  | 2394 | GEM_BUG_ON(obj->mm.quirked); | 
|  | 2395 | __i915_gem_object_pin_pages(obj); | 
|  | 2396 | obj->mm.quirked = true; | 
|  | 2397 | } | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2398 | } | 
|  | 2399 |  | 
|  | 2400 | static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | 
|  | 2401 | { | 
|  | 2402 | struct sg_table *pages; | 
|  | 2403 |  | 
| Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 2404 | GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); | 
|  | 2405 |  | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2406 | if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { | 
|  | 2407 | DRM_DEBUG("Attempting to obtain a purgeable object\n"); | 
|  | 2408 | return -EFAULT; | 
|  | 2409 | } | 
|  | 2410 |  | 
|  | 2411 | pages = obj->ops->get_pages(obj); | 
|  | 2412 | if (unlikely(IS_ERR(pages))) | 
|  | 2413 | return PTR_ERR(pages); | 
|  | 2414 |  | 
|  | 2415 | __i915_gem_object_set_pages(obj, pages); | 
|  | 2416 | return 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2417 | } | 
|  | 2418 |  | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 2419 | /* Ensure that the associated pages are gathered from the backing storage | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2420 | * and pinned into our object. i915_gem_object_pin_pages() may be called | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 2421 | * multiple times before they are released by a single call to | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2422 | * i915_gem_object_unpin_pages() - once the pages are no longer referenced | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 2423 | * either as a result of memory pressure (reaping pages under the shrinker) | 
|  | 2424 | * or as the object is itself released. | 
|  | 2425 | */ | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2426 | int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 2427 | { | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2428 | int err; | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 2429 |  | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2430 | err = mutex_lock_interruptible(&obj->mm.lock); | 
|  | 2431 | if (err) | 
|  | 2432 | return err; | 
| Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 2433 |  | 
| Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 2434 | if (unlikely(!obj->mm.pages)) { | 
|  | 2435 | err = ____i915_gem_object_get_pages(obj); | 
|  | 2436 | if (err) | 
|  | 2437 | goto unlock; | 
|  | 2438 |  | 
|  | 2439 | smp_mb__before_atomic(); | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2440 | } | 
| Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 2441 | atomic_inc(&obj->mm.pages_pin_count); | 
| Chris Wilson | 43e28f0 | 2013-01-08 10:53:09 +0000 | [diff] [blame] | 2442 |  | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2443 | unlock: | 
|  | 2444 | mutex_unlock(&obj->mm.lock); | 
| Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2445 | return err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2446 | } | 
|  | 2447 |  | 
| Dave Gordon | dd6034c | 2016-05-20 11:54:04 +0100 | [diff] [blame] | 2448 | /* The 'mapping' part of i915_gem_object_pin_map() below */ | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2449 | static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, | 
|  | 2450 | enum i915_map_type type) | 
| Dave Gordon | dd6034c | 2016-05-20 11:54:04 +0100 | [diff] [blame] | 2451 | { | 
|  | 2452 | unsigned long n_pages = obj->base.size >> PAGE_SHIFT; | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2453 | struct sg_table *sgt = obj->mm.pages; | 
| Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2454 | struct sgt_iter sgt_iter; | 
|  | 2455 | struct page *page; | 
| Dave Gordon | b338fa4 | 2016-05-20 11:54:05 +0100 | [diff] [blame] | 2456 | struct page *stack_pages[32]; | 
|  | 2457 | struct page **pages = stack_pages; | 
| Dave Gordon | dd6034c | 2016-05-20 11:54:04 +0100 | [diff] [blame] | 2458 | unsigned long i = 0; | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2459 | pgprot_t pgprot; | 
| Dave Gordon | dd6034c | 2016-05-20 11:54:04 +0100 | [diff] [blame] | 2460 | void *addr; | 
|  | 2461 |  | 
|  | 2462 | /* A single page can always be kmapped */ | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2463 | if (n_pages == 1 && type == I915_MAP_WB) | 
| Dave Gordon | dd6034c | 2016-05-20 11:54:04 +0100 | [diff] [blame] | 2464 | return kmap(sg_page(sgt->sgl)); | 
|  | 2465 |  | 
| Dave Gordon | b338fa4 | 2016-05-20 11:54:05 +0100 | [diff] [blame] | 2466 | if (n_pages > ARRAY_SIZE(stack_pages)) { | 
|  | 2467 | /* Too big for stack -- allocate temporary array instead */ | 
|  | 2468 | pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY); | 
|  | 2469 | if (!pages) | 
|  | 2470 | return NULL; | 
|  | 2471 | } | 
| Dave Gordon | dd6034c | 2016-05-20 11:54:04 +0100 | [diff] [blame] | 2472 |  | 
| Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2473 | for_each_sgt_page(page, sgt_iter, sgt) | 
|  | 2474 | pages[i++] = page; | 
| Dave Gordon | dd6034c | 2016-05-20 11:54:04 +0100 | [diff] [blame] | 2475 |  | 
|  | 2476 | /* Check that we have the expected number of pages */ | 
|  | 2477 | GEM_BUG_ON(i != n_pages); | 
|  | 2478 |  | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2479 | switch (type) { | 
|  | 2480 | case I915_MAP_WB: | 
|  | 2481 | pgprot = PAGE_KERNEL; | 
|  | 2482 | break; | 
|  | 2483 | case I915_MAP_WC: | 
|  | 2484 | pgprot = pgprot_writecombine(PAGE_KERNEL_IO); | 
|  | 2485 | break; | 
|  | 2486 | } | 
|  | 2487 | addr = vmap(pages, n_pages, 0, pgprot); | 
| Dave Gordon | dd6034c | 2016-05-20 11:54:04 +0100 | [diff] [blame] | 2488 |  | 
| Dave Gordon | b338fa4 | 2016-05-20 11:54:05 +0100 | [diff] [blame] | 2489 | if (pages != stack_pages) | 
|  | 2490 | drm_free_large(pages); | 
| Dave Gordon | dd6034c | 2016-05-20 11:54:04 +0100 | [diff] [blame] | 2491 |  | 
|  | 2492 | return addr; | 
|  | 2493 | } | 
|  | 2494 |  | 
|  | 2495 | /* get, pin, and map the pages of the object into kernel space */ | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2496 | void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, | 
|  | 2497 | enum i915_map_type type) | 
| Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 2498 | { | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2499 | enum i915_map_type has_type; | 
|  | 2500 | bool pinned; | 
|  | 2501 | void *ptr; | 
| Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 2502 | int ret; | 
|  | 2503 |  | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2504 | GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); | 
| Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 2505 |  | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2506 | ret = mutex_lock_interruptible(&obj->mm.lock); | 
| Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 2507 | if (ret) | 
|  | 2508 | return ERR_PTR(ret); | 
|  | 2509 |  | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2510 | pinned = true; | 
|  | 2511 | if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { | 
| Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 2512 | if (unlikely(!obj->mm.pages)) { | 
|  | 2513 | ret = ____i915_gem_object_get_pages(obj); | 
|  | 2514 | if (ret) | 
|  | 2515 | goto err_unlock; | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2516 |  | 
| Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 2517 | smp_mb__before_atomic(); | 
|  | 2518 | } | 
|  | 2519 | atomic_inc(&obj->mm.pages_pin_count); | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2520 | pinned = false; | 
|  | 2521 | } | 
|  | 2522 | GEM_BUG_ON(!obj->mm.pages); | 
| Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 2523 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2524 | ptr = ptr_unpack_bits(obj->mm.mapping, has_type); | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2525 | if (ptr && has_type != type) { | 
|  | 2526 | if (pinned) { | 
|  | 2527 | ret = -EBUSY; | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2528 | goto err_unpin; | 
| Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 2529 | } | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2530 |  | 
|  | 2531 | if (is_vmalloc_addr(ptr)) | 
|  | 2532 | vunmap(ptr); | 
|  | 2533 | else | 
|  | 2534 | kunmap(kmap_to_page(ptr)); | 
|  | 2535 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2536 | ptr = obj->mm.mapping = NULL; | 
| Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 2537 | } | 
|  | 2538 |  | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2539 | if (!ptr) { | 
|  | 2540 | ptr = i915_gem_object_map(obj, type); | 
|  | 2541 | if (!ptr) { | 
|  | 2542 | ret = -ENOMEM; | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2543 | goto err_unpin; | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2544 | } | 
|  | 2545 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 2546 | obj->mm.mapping = ptr_pack_bits(ptr, type); | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2547 | } | 
|  | 2548 |  | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2549 | out_unlock: | 
|  | 2550 | mutex_unlock(&obj->mm.lock); | 
| Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 2551 | return ptr; | 
|  | 2552 |  | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 2553 | err_unpin: | 
|  | 2554 | atomic_dec(&obj->mm.pages_pin_count); | 
|  | 2555 | err_unlock: | 
|  | 2556 | ptr = ERR_PTR(ret); | 
|  | 2557 | goto out_unlock; | 
| Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 2558 | } | 
|  | 2559 |  | 
| Chris Wilson | 6095868 | 2016-12-31 11:20:11 +0000 | [diff] [blame] | 2560 | static bool ban_context(const struct i915_gem_context *ctx) | 
| Mika Kuoppala | be62acb | 2013-08-30 16:19:28 +0300 | [diff] [blame] | 2561 | { | 
| Chris Wilson | 6095868 | 2016-12-31 11:20:11 +0000 | [diff] [blame] | 2562 | return (i915_gem_context_is_bannable(ctx) && | 
|  | 2563 | ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD); | 
| Mika Kuoppala | be62acb | 2013-08-30 16:19:28 +0300 | [diff] [blame] | 2564 | } | 
|  | 2565 |  | 
| Mika Kuoppala | e5e1fc4 | 2016-11-16 17:20:31 +0200 | [diff] [blame] | 2566 | static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) | 
| Mika Kuoppala | aa60c66 | 2013-06-12 15:13:20 +0300 | [diff] [blame] | 2567 | { | 
| Mika Kuoppala | bc1d53c | 2016-11-16 17:20:34 +0200 | [diff] [blame] | 2568 | ctx->guilty_count++; | 
| Chris Wilson | 6095868 | 2016-12-31 11:20:11 +0000 | [diff] [blame] | 2569 | ctx->ban_score += CONTEXT_SCORE_GUILTY; | 
|  | 2570 | if (ban_context(ctx)) | 
|  | 2571 | i915_gem_context_set_banned(ctx); | 
| Mika Kuoppala | b083a08 | 2016-11-18 15:10:47 +0200 | [diff] [blame] | 2572 |  | 
|  | 2573 | DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", | 
| Mika Kuoppala | bc1d53c | 2016-11-16 17:20:34 +0200 | [diff] [blame] | 2574 | ctx->name, ctx->ban_score, | 
| Chris Wilson | 6095868 | 2016-12-31 11:20:11 +0000 | [diff] [blame] | 2575 | yesno(i915_gem_context_is_banned(ctx))); | 
| Mika Kuoppala | b083a08 | 2016-11-18 15:10:47 +0200 | [diff] [blame] | 2576 |  | 
| Chris Wilson | 6095868 | 2016-12-31 11:20:11 +0000 | [diff] [blame] | 2577 | if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv)) | 
| Mika Kuoppala | b083a08 | 2016-11-18 15:10:47 +0200 | [diff] [blame] | 2578 | return; | 
|  | 2579 |  | 
| Chris Wilson | d9e9da6 | 2016-11-22 14:41:18 +0000 | [diff] [blame] | 2580 | ctx->file_priv->context_bans++; | 
|  | 2581 | DRM_DEBUG_DRIVER("client %s has had %d context banned\n", | 
|  | 2582 | ctx->name, ctx->file_priv->context_bans); | 
| Mika Kuoppala | e5e1fc4 | 2016-11-16 17:20:31 +0200 | [diff] [blame] | 2583 | } | 
|  | 2584 |  | 
|  | 2585 | static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) | 
|  | 2586 | { | 
| Mika Kuoppala | bc1d53c | 2016-11-16 17:20:34 +0200 | [diff] [blame] | 2587 | ctx->active_count++; | 
| Mika Kuoppala | aa60c66 | 2013-06-12 15:13:20 +0300 | [diff] [blame] | 2588 | } | 
|  | 2589 |  | 
| Chris Wilson | 8d9fc7f | 2014-02-25 17:11:23 +0200 | [diff] [blame] | 2590 | struct drm_i915_gem_request * | 
| Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 2591 | i915_gem_find_active_request(struct intel_engine_cs *engine) | 
| Chris Wilson | 9375e44 | 2010-09-19 12:21:28 +0100 | [diff] [blame] | 2592 | { | 
| Chris Wilson | 4db080f | 2013-12-04 11:37:09 +0000 | [diff] [blame] | 2593 | struct drm_i915_gem_request *request; | 
| Mika Kuoppala | aa60c66 | 2013-06-12 15:13:20 +0300 | [diff] [blame] | 2594 |  | 
| Chris Wilson | f69a02c | 2016-07-01 17:23:16 +0100 | [diff] [blame] | 2595 | /* We are called by the error capture and reset at a random | 
|  | 2596 | * point in time. In particular, note that neither is crucially | 
|  | 2597 | * ordered with an interrupt. After a hang, the GPU is dead and we | 
|  | 2598 | * assume that no more writes can happen (we waited long enough for | 
|  | 2599 | * all writes that were in transaction to be flushed) - adding an | 
|  | 2600 | * extra delay for a recent interrupt is pointless. Hence, we do | 
|  | 2601 | * not need an engine->irq_seqno_barrier() before the seqno reads. | 
|  | 2602 | */ | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 2603 | list_for_each_entry(request, &engine->timeline->requests, link) { | 
| Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 2604 | if (__i915_gem_request_completed(request)) | 
| Chris Wilson | 4db080f | 2013-12-04 11:37:09 +0000 | [diff] [blame] | 2605 | continue; | 
| Mika Kuoppala | aa60c66 | 2013-06-12 15:13:20 +0300 | [diff] [blame] | 2606 |  | 
| Mika Kuoppala | 36193ac | 2017-01-17 17:59:02 +0200 | [diff] [blame] | 2607 | GEM_BUG_ON(request->engine != engine); | 
| Mika Kuoppala | b6b0fac | 2014-01-30 19:04:43 +0200 | [diff] [blame] | 2608 | return request; | 
| Chris Wilson | 4db080f | 2013-12-04 11:37:09 +0000 | [diff] [blame] | 2609 | } | 
| Mika Kuoppala | b6b0fac | 2014-01-30 19:04:43 +0200 | [diff] [blame] | 2610 |  | 
|  | 2611 | return NULL; | 
|  | 2612 | } | 
|  | 2613 |  | 
| Mika Kuoppala | bf2f043 | 2017-01-17 17:59:04 +0200 | [diff] [blame] | 2614 | static bool engine_stalled(struct intel_engine_cs *engine) | 
|  | 2615 | { | 
|  | 2616 | if (!engine->hangcheck.stalled) | 
|  | 2617 | return false; | 
|  | 2618 |  | 
|  | 2619 | /* Check for possible seqno movement after hang declaration */ | 
|  | 2620 | if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { | 
|  | 2621 | DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); | 
|  | 2622 | return false; | 
|  | 2623 | } | 
|  | 2624 |  | 
|  | 2625 | return true; | 
|  | 2626 | } | 
|  | 2627 |  | 
| Chris Wilson | 0e178ae | 2017-01-17 17:59:06 +0200 | [diff] [blame] | 2628 | int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) | 
| Chris Wilson | 4c96554 | 2017-01-17 17:59:01 +0200 | [diff] [blame] | 2629 | { | 
|  | 2630 | struct intel_engine_cs *engine; | 
|  | 2631 | enum intel_engine_id id; | 
| Chris Wilson | 0e178ae | 2017-01-17 17:59:06 +0200 | [diff] [blame] | 2632 | int err = 0; | 
| Chris Wilson | 4c96554 | 2017-01-17 17:59:01 +0200 | [diff] [blame] | 2633 |  | 
|  | 2634 | /* Ensure irq handler finishes, and not run again. */ | 
| Chris Wilson | 0e178ae | 2017-01-17 17:59:06 +0200 | [diff] [blame] | 2635 | for_each_engine(engine, dev_priv, id) { | 
|  | 2636 | struct drm_i915_gem_request *request; | 
|  | 2637 |  | 
| Chris Wilson | 4c96554 | 2017-01-17 17:59:01 +0200 | [diff] [blame] | 2638 | tasklet_kill(&engine->irq_tasklet); | 
|  | 2639 |  | 
| Chris Wilson | 0e178ae | 2017-01-17 17:59:06 +0200 | [diff] [blame] | 2640 | if (engine_stalled(engine)) { | 
|  | 2641 | request = i915_gem_find_active_request(engine); | 
|  | 2642 | if (request && request->fence.error == -EIO) | 
|  | 2643 | err = -EIO; /* Previous reset failed! */ | 
|  | 2644 | } | 
|  | 2645 | } | 
|  | 2646 |  | 
| Chris Wilson | 4c96554 | 2017-01-17 17:59:01 +0200 | [diff] [blame] | 2647 | i915_gem_revoke_fences(dev_priv); | 
| Chris Wilson | 0e178ae | 2017-01-17 17:59:06 +0200 | [diff] [blame] | 2648 |  | 
|  | 2649 | return err; | 
| Chris Wilson | 4c96554 | 2017-01-17 17:59:01 +0200 | [diff] [blame] | 2650 | } | 
|  | 2651 |  | 
| Mika Kuoppala | 36193ac | 2017-01-17 17:59:02 +0200 | [diff] [blame] | 2652 | static void skip_request(struct drm_i915_gem_request *request) | 
| Mika Kuoppala | b6b0fac | 2014-01-30 19:04:43 +0200 | [diff] [blame] | 2653 | { | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2654 | void *vaddr = request->ring->vaddr; | 
|  | 2655 | u32 head; | 
| Mika Kuoppala | b6b0fac | 2014-01-30 19:04:43 +0200 | [diff] [blame] | 2656 |  | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2657 | /* As this request likely depends on state from the lost | 
|  | 2658 | * context, clear out all the user operations leaving the | 
|  | 2659 | * breadcrumb at the end (so we get the fence notifications). | 
|  | 2660 | */ | 
|  | 2661 | head = request->head; | 
|  | 2662 | if (request->postfix < head) { | 
|  | 2663 | memset(vaddr + head, 0, request->ring->size - head); | 
|  | 2664 | head = 0; | 
|  | 2665 | } | 
|  | 2666 | memset(vaddr + head, 0, request->postfix - head); | 
| Chris Wilson | c0d5f32 | 2017-01-10 17:22:43 +0000 | [diff] [blame] | 2667 |  | 
|  | 2668 | dma_fence_set_error(&request->fence, -EIO); | 
| Chris Wilson | 4db080f | 2013-12-04 11:37:09 +0000 | [diff] [blame] | 2669 | } | 
|  | 2670 |  | 
| Mika Kuoppala | 36193ac | 2017-01-17 17:59:02 +0200 | [diff] [blame] | 2671 | static void engine_skip_context(struct drm_i915_gem_request *request) | 
|  | 2672 | { | 
|  | 2673 | struct intel_engine_cs *engine = request->engine; | 
|  | 2674 | struct i915_gem_context *hung_ctx = request->ctx; | 
|  | 2675 | struct intel_timeline *timeline; | 
|  | 2676 | unsigned long flags; | 
|  | 2677 |  | 
|  | 2678 | timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); | 
|  | 2679 |  | 
|  | 2680 | spin_lock_irqsave(&engine->timeline->lock, flags); | 
|  | 2681 | spin_lock(&timeline->lock); | 
|  | 2682 |  | 
|  | 2683 | list_for_each_entry_continue(request, &engine->timeline->requests, link) | 
|  | 2684 | if (request->ctx == hung_ctx) | 
|  | 2685 | skip_request(request); | 
|  | 2686 |  | 
|  | 2687 | list_for_each_entry(request, &timeline->requests, link) | 
|  | 2688 | skip_request(request); | 
|  | 2689 |  | 
|  | 2690 | spin_unlock(&timeline->lock); | 
|  | 2691 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | 
|  | 2692 | } | 
|  | 2693 |  | 
| Mika Kuoppala | 61da536 | 2017-01-17 17:59:05 +0200 | [diff] [blame] | 2694 | /* Returns true if the request was guilty of hang */ | 
|  | 2695 | static bool i915_gem_reset_request(struct drm_i915_gem_request *request) | 
|  | 2696 | { | 
|  | 2697 | /* Read once and return the resolution */ | 
|  | 2698 | const bool guilty = engine_stalled(request->engine); | 
|  | 2699 |  | 
| Mika Kuoppala | 71895a0 | 2017-01-17 17:59:07 +0200 | [diff] [blame] | 2700 | /* The guilty request will get skipped on a hung engine. | 
|  | 2701 | * | 
|  | 2702 | * Users of client default contexts do not rely on logical | 
|  | 2703 | * state preserved between batches so it is safe to execute | 
|  | 2704 | * queued requests following the hang. Non default contexts | 
|  | 2705 | * rely on preserved state, so skipping a batch loses the | 
|  | 2706 | * evolution of the state and it needs to be considered corrupted. | 
|  | 2707 | * Executing more queued batches on top of corrupted state is | 
|  | 2708 | * risky. But we take the risk by trying to advance through | 
|  | 2709 | * the queued requests in order to make the client behaviour | 
|  | 2710 | * more predictable around resets, by not throwing away random | 
|  | 2711 | * amount of batches it has prepared for execution. Sophisticated | 
|  | 2712 | * clients can use gem_reset_stats_ioctl and dma fence status | 
|  | 2713 | * (exported via sync_file info ioctl on explicit fences) to observe | 
|  | 2714 | * when it loses the context state and should rebuild accordingly. | 
|  | 2715 | * | 
|  | 2716 | * The context ban, and ultimately the client ban, mechanism are safety | 
|  | 2717 | * valves if client submission ends up resulting in nothing more than | 
|  | 2718 | * subsequent hangs. | 
|  | 2719 | */ | 
|  | 2720 |  | 
| Mika Kuoppala | 61da536 | 2017-01-17 17:59:05 +0200 | [diff] [blame] | 2721 | if (guilty) { | 
|  | 2722 | i915_gem_context_mark_guilty(request->ctx); | 
|  | 2723 | skip_request(request); | 
|  | 2724 | } else { | 
|  | 2725 | i915_gem_context_mark_innocent(request->ctx); | 
|  | 2726 | dma_fence_set_error(&request->fence, -EAGAIN); | 
|  | 2727 | } | 
|  | 2728 |  | 
|  | 2729 | return guilty; | 
|  | 2730 | } | 
|  | 2731 |  | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2732 | static void i915_gem_reset_engine(struct intel_engine_cs *engine) | 
| Chris Wilson | 4db080f | 2013-12-04 11:37:09 +0000 | [diff] [blame] | 2733 | { | 
| Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 2734 | struct drm_i915_gem_request *request; | 
| Chris Wilson | 608c1a5 | 2015-09-03 13:01:40 +0100 | [diff] [blame] | 2735 |  | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2736 | if (engine->irq_seqno_barrier) | 
|  | 2737 | engine->irq_seqno_barrier(engine); | 
|  | 2738 |  | 
|  | 2739 | request = i915_gem_find_active_request(engine); | 
|  | 2740 | if (!request) | 
|  | 2741 | return; | 
|  | 2742 |  | 
| Mika Kuoppala | 61da536 | 2017-01-17 17:59:05 +0200 | [diff] [blame] | 2743 | if (!i915_gem_reset_request(request)) | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2744 | return; | 
|  | 2745 |  | 
|  | 2746 | DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", | 
| Chris Wilson | 65e4760 | 2016-10-28 13:58:49 +0100 | [diff] [blame] | 2747 | engine->name, request->global_seqno); | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2748 |  | 
|  | 2749 | /* Setup the CS to resume from the breadcrumb of the hung request */ | 
|  | 2750 | engine->reset_hw(engine, request); | 
|  | 2751 |  | 
| Chris Wilson | 7ec73b7 | 2017-01-05 17:00:59 +0000 | [diff] [blame] | 2752 | /* If this context is now banned, skip all of its pending requests. */ | 
| Mika Kuoppala | 61da536 | 2017-01-17 17:59:05 +0200 | [diff] [blame] | 2753 | if (i915_gem_context_is_banned(request->ctx)) | 
| Mika Kuoppala | 211b12a | 2017-01-17 17:59:03 +0200 | [diff] [blame] | 2754 | engine_skip_context(request); | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2755 | } | 
|  | 2756 |  | 
| Chris Wilson | b1ed35d | 2017-01-04 14:51:10 +0000 | [diff] [blame] | 2757 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2758 | { | 
|  | 2759 | struct intel_engine_cs *engine; | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2760 | enum intel_engine_id id; | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2761 |  | 
| Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 2762 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 
|  | 2763 |  | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2764 | i915_gem_retire_requests(dev_priv); | 
|  | 2765 |  | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2766 | for_each_engine(engine, dev_priv, id) | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2767 | i915_gem_reset_engine(engine); | 
|  | 2768 |  | 
| Tvrtko Ursulin | 4362f4f | 2016-11-16 08:55:33 +0000 | [diff] [blame] | 2769 | i915_gem_restore_fences(dev_priv); | 
| Chris Wilson | f2a91d1 | 2016-09-21 14:51:06 +0100 | [diff] [blame] | 2770 |  | 
|  | 2771 | if (dev_priv->gt.awake) { | 
|  | 2772 | intel_sanitize_gt_powersave(dev_priv); | 
|  | 2773 | intel_enable_gt_powersave(dev_priv); | 
|  | 2774 | if (INTEL_GEN(dev_priv) >= 6) | 
|  | 2775 | gen6_rps_busy(dev_priv); | 
|  | 2776 | } | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2777 | } | 
|  | 2778 |  | 
|  | 2779 | static void nop_submit_request(struct drm_i915_gem_request *request) | 
|  | 2780 | { | 
| Chris Wilson | 3cd9442 | 2017-01-10 17:22:45 +0000 | [diff] [blame] | 2781 | dma_fence_set_error(&request->fence, -EIO); | 
| Chris Wilson | 3dcf93f | 2016-11-22 14:41:20 +0000 | [diff] [blame] | 2782 | i915_gem_request_submit(request); | 
|  | 2783 | intel_engine_init_global_seqno(request->engine, request->global_seqno); | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2784 | } | 
|  | 2785 |  | 
| Chris Wilson | 2a20d6f | 2017-01-10 17:22:46 +0000 | [diff] [blame] | 2786 | static void engine_set_wedged(struct intel_engine_cs *engine) | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2787 | { | 
| Chris Wilson | 3cd9442 | 2017-01-10 17:22:45 +0000 | [diff] [blame] | 2788 | struct drm_i915_gem_request *request; | 
|  | 2789 | unsigned long flags; | 
|  | 2790 |  | 
| Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 2791 | /* We need to be sure that no thread is running the old callback as | 
|  | 2792 | * we install the nop handler (otherwise we would submit a request | 
|  | 2793 | * to hardware that will never complete). In order to prevent this | 
|  | 2794 | * race, we wait until the machine is idle before making the swap | 
|  | 2795 | * (using stop_machine()). | 
|  | 2796 | */ | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2797 | engine->submit_request = nop_submit_request; | 
| Chris Wilson | 70c2a24 | 2016-09-09 14:11:46 +0100 | [diff] [blame] | 2798 |  | 
| Chris Wilson | 3cd9442 | 2017-01-10 17:22:45 +0000 | [diff] [blame] | 2799 | /* Mark all executing requests as skipped */ | 
|  | 2800 | spin_lock_irqsave(&engine->timeline->lock, flags); | 
|  | 2801 | list_for_each_entry(request, &engine->timeline->requests, link) | 
|  | 2802 | dma_fence_set_error(&request->fence, -EIO); | 
|  | 2803 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | 
|  | 2804 |  | 
| Chris Wilson | c4b0930 | 2016-07-20 09:21:10 +0100 | [diff] [blame] | 2805 | /* Mark all pending requests as complete so that any concurrent | 
|  | 2806 | * (lockless) lookup doesn't try and wait upon the request as we | 
|  | 2807 | * reset it. | 
|  | 2808 | */ | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 2809 | intel_engine_init_global_seqno(engine, | 
| Chris Wilson | cb399ea | 2016-11-01 10:03:16 +0000 | [diff] [blame] | 2810 | intel_engine_last_submit(engine)); | 
| Chris Wilson | c4b0930 | 2016-07-20 09:21:10 +0100 | [diff] [blame] | 2811 |  | 
| Ben Widawsky | 1d62bee | 2014-01-01 10:15:13 -0800 | [diff] [blame] | 2812 | /* | 
| Oscar Mateo | dcb4c12 | 2014-11-13 10:28:10 +0000 | [diff] [blame] | 2813 | * Clear the execlists queue up before freeing the requests, as those | 
|  | 2814 | * are the ones that keep the context and ringbuffer backing objects | 
|  | 2815 | * pinned in place. | 
|  | 2816 | */ | 
| Oscar Mateo | dcb4c12 | 2014-11-13 10:28:10 +0000 | [diff] [blame] | 2817 |  | 
| Tomas Elf | 7de1691a | 2015-10-19 16:32:32 +0100 | [diff] [blame] | 2818 | if (i915.enable_execlists) { | 
| Chris Wilson | 663f71e | 2016-11-14 20:41:00 +0000 | [diff] [blame] | 2819 | unsigned long flags; | 
|  | 2820 |  | 
|  | 2821 | spin_lock_irqsave(&engine->timeline->lock, flags); | 
|  | 2822 |  | 
| Chris Wilson | 70c2a24 | 2016-09-09 14:11:46 +0100 | [diff] [blame] | 2823 | i915_gem_request_put(engine->execlist_port[0].request); | 
|  | 2824 | i915_gem_request_put(engine->execlist_port[1].request); | 
|  | 2825 | memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); | 
| Chris Wilson | 20311bd | 2016-11-14 20:41:03 +0000 | [diff] [blame] | 2826 | engine->execlist_queue = RB_ROOT; | 
|  | 2827 | engine->execlist_first = NULL; | 
| Chris Wilson | 663f71e | 2016-11-14 20:41:00 +0000 | [diff] [blame] | 2828 |  | 
|  | 2829 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | 
| Oscar Mateo | dcb4c12 | 2014-11-13 10:28:10 +0000 | [diff] [blame] | 2830 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2831 | } | 
|  | 2832 |  | 
| Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 2833 | static int __i915_gem_set_wedged_BKL(void *data) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2834 | { | 
| Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 2835 | struct drm_i915_private *i915 = data; | 
| Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 2836 | struct intel_engine_cs *engine; | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2837 | enum intel_engine_id id; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2838 |  | 
| Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 2839 | for_each_engine(engine, i915, id) | 
| Chris Wilson | 2a20d6f | 2017-01-10 17:22:46 +0000 | [diff] [blame] | 2840 | engine_set_wedged(engine); | 
| Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 2841 |  | 
|  | 2842 | return 0; | 
|  | 2843 | } | 
|  | 2844 |  | 
|  | 2845 | void i915_gem_set_wedged(struct drm_i915_private *dev_priv) | 
|  | 2846 | { | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2847 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 
|  | 2848 | set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); | 
| Chris Wilson | 4db080f | 2013-12-04 11:37:09 +0000 | [diff] [blame] | 2849 |  | 
| Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 2850 | stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 2851 |  | 
| Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 2852 | i915_gem_context_lost(dev_priv); | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2853 | i915_gem_retire_requests(dev_priv); | 
| Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 2854 |  | 
|  | 2855 | mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2856 | } | 
|  | 2857 |  | 
| Daniel Vetter | 75ef9da | 2010-08-21 00:25:16 +0200 | [diff] [blame] | 2858 | static void | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2859 | i915_gem_retire_work_handler(struct work_struct *work) | 
|  | 2860 | { | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2861 | struct drm_i915_private *dev_priv = | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2862 | container_of(work, typeof(*dev_priv), gt.retire_work.work); | 
| Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 2863 | struct drm_device *dev = &dev_priv->drm; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2864 |  | 
| Chris Wilson | 891b48c | 2010-09-29 12:26:37 +0100 | [diff] [blame] | 2865 | /* Come back later if the device is busy... */ | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2866 | if (mutex_trylock(&dev->struct_mutex)) { | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2867 | i915_gem_retire_requests(dev_priv); | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2868 | mutex_unlock(&dev->struct_mutex); | 
|  | 2869 | } | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2870 |  | 
|  | 2871 | /* Keep the retire handler running until we are finally idle. | 
|  | 2872 | * We do not need to do this test under locking as in the worst-case | 
|  | 2873 | * we queue the retire worker once too often. | 
|  | 2874 | */ | 
| Chris Wilson | c961561 | 2016-07-09 10:12:06 +0100 | [diff] [blame] | 2875 | if (READ_ONCE(dev_priv->gt.awake)) { | 
|  | 2876 | i915_queue_hangcheck(dev_priv); | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2877 | queue_delayed_work(dev_priv->wq, | 
|  | 2878 | &dev_priv->gt.retire_work, | 
| Chris Wilson | bcb4508 | 2012-10-05 17:02:57 +0100 | [diff] [blame] | 2879 | round_jiffies_up_relative(HZ)); | 
| Chris Wilson | c961561 | 2016-07-09 10:12:06 +0100 | [diff] [blame] | 2880 | } | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2881 | } | 
| Chris Wilson | 891b48c | 2010-09-29 12:26:37 +0100 | [diff] [blame] | 2882 |  | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2883 | static void | 
|  | 2884 | i915_gem_idle_work_handler(struct work_struct *work) | 
|  | 2885 | { | 
|  | 2886 | struct drm_i915_private *dev_priv = | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2887 | container_of(work, typeof(*dev_priv), gt.idle_work.work); | 
| Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 2888 | struct drm_device *dev = &dev_priv->drm; | 
| Dave Gordon | b4ac5af | 2016-03-24 11:20:38 +0000 | [diff] [blame] | 2889 | struct intel_engine_cs *engine; | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2890 | enum intel_engine_id id; | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2891 | bool rearm_hangcheck; | 
|  | 2892 |  | 
|  | 2893 | if (!READ_ONCE(dev_priv->gt.awake)) | 
|  | 2894 | return; | 
|  | 2895 |  | 
| Imre Deak | 0cb5670 | 2016-11-07 11:20:04 +0200 | [diff] [blame] | 2896 | /* | 
|  | 2897 | * Wait for last execlists context complete, but bail out in case a | 
|  | 2898 | * new request is submitted. | 
|  | 2899 | */ | 
|  | 2900 | wait_for(READ_ONCE(dev_priv->gt.active_requests) || | 
|  | 2901 | intel_execlists_idle(dev_priv), 10); | 
|  | 2902 |  | 
| Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 2903 | if (READ_ONCE(dev_priv->gt.active_requests)) | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2904 | return; | 
|  | 2905 |  | 
|  | 2906 | rearm_hangcheck = | 
|  | 2907 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | 
|  | 2908 |  | 
|  | 2909 | if (!mutex_trylock(&dev->struct_mutex)) { | 
|  | 2910 | /* Currently busy, come back later */ | 
|  | 2911 | mod_delayed_work(dev_priv->wq, | 
|  | 2912 | &dev_priv->gt.idle_work, | 
|  | 2913 | msecs_to_jiffies(50)); | 
|  | 2914 | goto out_rearm; | 
|  | 2915 | } | 
|  | 2916 |  | 
| Imre Deak | 93c97dc | 2016-11-07 11:20:03 +0200 | [diff] [blame] | 2917 | /* | 
|  | 2918 | * New request retired after this work handler started, extend active | 
|  | 2919 | * period until next instance of the work. | 
|  | 2920 | */ | 
|  | 2921 | if (work_pending(work)) | 
|  | 2922 | goto out_unlock; | 
|  | 2923 |  | 
| Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 2924 | if (dev_priv->gt.active_requests) | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2925 | goto out_unlock; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2926 |  | 
| Imre Deak | 0cb5670 | 2016-11-07 11:20:04 +0200 | [diff] [blame] | 2927 | if (wait_for(intel_execlists_idle(dev_priv), 10)) | 
|  | 2928 | DRM_ERROR("Timeout waiting for engines to idle\n"); | 
|  | 2929 |  | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2930 | for_each_engine(engine, dev_priv, id) | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2931 | i915_gem_batch_pool_fini(&engine->batch_pool); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2932 |  | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2933 | GEM_BUG_ON(!dev_priv->gt.awake); | 
|  | 2934 | dev_priv->gt.awake = false; | 
|  | 2935 | rearm_hangcheck = false; | 
| Daniel Vetter | 30ecad7 | 2015-12-09 09:29:36 +0100 | [diff] [blame] | 2936 |  | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2937 | if (INTEL_GEN(dev_priv) >= 6) | 
|  | 2938 | gen6_rps_idle(dev_priv); | 
|  | 2939 | intel_runtime_pm_put(dev_priv); | 
|  | 2940 | out_unlock: | 
|  | 2941 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | 35c9418 | 2015-04-07 16:20:37 +0100 | [diff] [blame] | 2942 |  | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2943 | out_rearm: | 
|  | 2944 | if (rearm_hangcheck) { | 
|  | 2945 | GEM_BUG_ON(!dev_priv->gt.awake); | 
|  | 2946 | i915_queue_hangcheck(dev_priv); | 
| Chris Wilson | 35c9418 | 2015-04-07 16:20:37 +0100 | [diff] [blame] | 2947 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2948 | } | 
|  | 2949 |  | 
| Chris Wilson | b1f788c | 2016-08-04 07:52:45 +0100 | [diff] [blame] | 2950 | void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) | 
|  | 2951 | { | 
|  | 2952 | struct drm_i915_gem_object *obj = to_intel_bo(gem); | 
|  | 2953 | struct drm_i915_file_private *fpriv = file->driver_priv; | 
|  | 2954 | struct i915_vma *vma, *vn; | 
|  | 2955 |  | 
|  | 2956 | mutex_lock(&obj->base.dev->struct_mutex); | 
|  | 2957 | list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link) | 
|  | 2958 | if (vma->vm->file == fpriv) | 
|  | 2959 | i915_vma_close(vma); | 
| Chris Wilson | f8a7fde | 2016-10-28 13:58:29 +0100 | [diff] [blame] | 2960 |  | 
|  | 2961 | if (i915_gem_object_is_active(obj) && | 
|  | 2962 | !i915_gem_object_has_active_reference(obj)) { | 
|  | 2963 | i915_gem_object_set_active_reference(obj); | 
|  | 2964 | i915_gem_object_get(obj); | 
|  | 2965 | } | 
| Chris Wilson | b1f788c | 2016-08-04 07:52:45 +0100 | [diff] [blame] | 2966 | mutex_unlock(&obj->base.dev->struct_mutex); | 
|  | 2967 | } | 
|  | 2968 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 2969 | static unsigned long to_wait_timeout(s64 timeout_ns) | 
|  | 2970 | { | 
|  | 2971 | if (timeout_ns < 0) | 
|  | 2972 | return MAX_SCHEDULE_TIMEOUT; | 
|  | 2973 |  | 
|  | 2974 | if (timeout_ns == 0) | 
|  | 2975 | return 0; | 
|  | 2976 |  | 
|  | 2977 | return nsecs_to_jiffies_timeout(timeout_ns); | 
|  | 2978 | } | 
|  | 2979 |  | 
| Ben Widawsky | 5816d64 | 2012-04-11 11:18:19 -0700 | [diff] [blame] | 2980 | /** | 
| Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 2981 | * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 2982 | * @dev: drm device pointer | 
|  | 2983 | * @data: ioctl data blob | 
|  | 2984 | * @file: drm file pointer | 
| Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 2985 | * | 
|  | 2986 | * Returns 0 if successful, else an error is returned with the remaining time in | 
|  | 2987 | * the timeout parameter. | 
|  | 2988 | *  -ETIME: object is still busy after timeout | 
|  | 2989 | *  -ERESTARTSYS: signal interrupted the wait | 
|  | 2990 | *  -ENONENT: object doesn't exist | 
|  | 2991 | * Also possible, but rare: | 
|  | 2992 | *  -EAGAIN: GPU wedged | 
|  | 2993 | *  -ENOMEM: damn | 
|  | 2994 | *  -ENODEV: Internal IRQ fail | 
|  | 2995 | *  -E?: The add request failed | 
|  | 2996 | * | 
|  | 2997 | * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any | 
|  | 2998 | * non-zero timeout parameter the wait ioctl will wait for the given number of | 
|  | 2999 | * nanoseconds on an object becoming unbusy. Since the wait itself does so | 
|  | 3000 | * without holding struct_mutex the object may become re-busied before this | 
|  | 3001 | * function completes. A similar but shorter * race condition exists in the busy | 
|  | 3002 | * ioctl | 
|  | 3003 | */ | 
|  | 3004 | int | 
|  | 3005 | i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | 
|  | 3006 | { | 
|  | 3007 | struct drm_i915_gem_wait *args = data; | 
|  | 3008 | struct drm_i915_gem_object *obj; | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3009 | ktime_t start; | 
|  | 3010 | long ret; | 
| Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 3011 |  | 
| Daniel Vetter | 11b5d51 | 2014-09-29 15:31:26 +0200 | [diff] [blame] | 3012 | if (args->flags != 0) | 
|  | 3013 | return -EINVAL; | 
|  | 3014 |  | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 3015 | obj = i915_gem_object_lookup(file, args->bo_handle); | 
| Chris Wilson | 033d549 | 2016-08-05 10:14:17 +0100 | [diff] [blame] | 3016 | if (!obj) | 
| Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 3017 | return -ENOENT; | 
| Chris Wilson | 033d549 | 2016-08-05 10:14:17 +0100 | [diff] [blame] | 3018 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3019 | start = ktime_get(); | 
|  | 3020 |  | 
|  | 3021 | ret = i915_gem_object_wait(obj, | 
|  | 3022 | I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, | 
|  | 3023 | to_wait_timeout(args->timeout_ns), | 
|  | 3024 | to_rps_client(file)); | 
|  | 3025 |  | 
|  | 3026 | if (args->timeout_ns > 0) { | 
|  | 3027 | args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); | 
|  | 3028 | if (args->timeout_ns < 0) | 
|  | 3029 | args->timeout_ns = 0; | 
| Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 3030 | } | 
|  | 3031 |  | 
| Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 3032 | i915_gem_object_put(obj); | 
| John Harrison | ff86588 | 2014-11-24 18:49:28 +0000 | [diff] [blame] | 3033 | return ret; | 
| Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 3034 | } | 
|  | 3035 |  | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 3036 | static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 3037 | { | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 3038 | int ret, i; | 
|  | 3039 |  | 
|  | 3040 | for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { | 
|  | 3041 | ret = i915_gem_active_wait(&tl->engine[i].last_request, flags); | 
|  | 3042 | if (ret) | 
|  | 3043 | return ret; | 
|  | 3044 | } | 
|  | 3045 |  | 
|  | 3046 | return 0; | 
|  | 3047 | } | 
|  | 3048 |  | 
|  | 3049 | int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) | 
|  | 3050 | { | 
| Dave Gordon | b4ac5af | 2016-03-24 11:20:38 +0000 | [diff] [blame] | 3051 | int ret; | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 3052 |  | 
| Chris Wilson | 9caa34a | 2016-11-11 14:58:08 +0000 | [diff] [blame] | 3053 | if (flags & I915_WAIT_LOCKED) { | 
|  | 3054 | struct i915_gem_timeline *tl; | 
|  | 3055 |  | 
|  | 3056 | lockdep_assert_held(&i915->drm.struct_mutex); | 
|  | 3057 |  | 
|  | 3058 | list_for_each_entry(tl, &i915->gt.timelines, link) { | 
|  | 3059 | ret = wait_for_timeline(tl, flags); | 
|  | 3060 | if (ret) | 
|  | 3061 | return ret; | 
|  | 3062 | } | 
|  | 3063 | } else { | 
|  | 3064 | ret = wait_for_timeline(&i915->gt.global_timeline, flags); | 
| Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 3065 | if (ret) | 
|  | 3066 | return ret; | 
|  | 3067 | } | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 3068 |  | 
| Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame] | 3069 | return 0; | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 3070 | } | 
|  | 3071 |  | 
| Chris Wilson | d0da48c | 2016-11-06 12:59:59 +0000 | [diff] [blame] | 3072 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj, | 
|  | 3073 | bool force) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3074 | { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3075 | /* If we don't have a page list set up, then we're not pinned | 
|  | 3076 | * to GPU, and we can ignore the cache flush because it'll happen | 
|  | 3077 | * again at bind time. | 
|  | 3078 | */ | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3079 | if (!obj->mm.pages) | 
| Chris Wilson | d0da48c | 2016-11-06 12:59:59 +0000 | [diff] [blame] | 3080 | return; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3081 |  | 
| Imre Deak | 769ce46 | 2013-02-13 21:56:05 +0200 | [diff] [blame] | 3082 | /* | 
|  | 3083 | * Stolen memory is always coherent with the GPU as it is explicitly | 
|  | 3084 | * marked as wc by the system, or the system is cache-coherent. | 
|  | 3085 | */ | 
| Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 3086 | if (obj->stolen || obj->phys_handle) | 
| Chris Wilson | d0da48c | 2016-11-06 12:59:59 +0000 | [diff] [blame] | 3087 | return; | 
| Imre Deak | 769ce46 | 2013-02-13 21:56:05 +0200 | [diff] [blame] | 3088 |  | 
| Chris Wilson | 9c23f7f | 2011-03-29 16:59:52 -0700 | [diff] [blame] | 3089 | /* If the GPU is snooping the contents of the CPU cache, | 
|  | 3090 | * we do not need to manually clear the CPU cache lines.  However, | 
|  | 3091 | * the caches are only snooped when the render cache is | 
|  | 3092 | * flushed/invalidated.  As we always have to emit invalidations | 
|  | 3093 | * and flushes when moving into and out of the RENDER domain, correct | 
|  | 3094 | * snooping behaviour occurs naturally as the result of our domain | 
|  | 3095 | * tracking. | 
|  | 3096 | */ | 
| Chris Wilson | 0f71979 | 2015-01-13 13:32:52 +0000 | [diff] [blame] | 3097 | if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) { | 
|  | 3098 | obj->cache_dirty = true; | 
| Chris Wilson | d0da48c | 2016-11-06 12:59:59 +0000 | [diff] [blame] | 3099 | return; | 
| Chris Wilson | 0f71979 | 2015-01-13 13:32:52 +0000 | [diff] [blame] | 3100 | } | 
| Chris Wilson | 9c23f7f | 2011-03-29 16:59:52 -0700 | [diff] [blame] | 3101 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3102 | trace_i915_gem_object_clflush(obj); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3103 | drm_clflush_sg(obj->mm.pages); | 
| Chris Wilson | 0f71979 | 2015-01-13 13:32:52 +0000 | [diff] [blame] | 3104 | obj->cache_dirty = false; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3105 | } | 
|  | 3106 |  | 
|  | 3107 | /** Flushes the GTT write domain for the object if it's dirty. */ | 
|  | 3108 | static void | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3109 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3110 | { | 
| Chris Wilson | 3b5724d | 2016-08-18 17:16:49 +0100 | [diff] [blame] | 3111 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3112 |  | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3113 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3114 | return; | 
|  | 3115 |  | 
| Chris Wilson | 63256ec | 2011-01-04 18:42:07 +0000 | [diff] [blame] | 3116 | /* No actual flushing is required for the GTT write domain.  Writes | 
| Chris Wilson | 3b5724d | 2016-08-18 17:16:49 +0100 | [diff] [blame] | 3117 | * to it "immediately" go to main memory as far as we know, so there's | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3118 | * no chipset flush.  It also doesn't land in render cache. | 
| Chris Wilson | 63256ec | 2011-01-04 18:42:07 +0000 | [diff] [blame] | 3119 | * | 
|  | 3120 | * However, we do have to enforce the order so that all writes through | 
|  | 3121 | * the GTT land before any writes to the device, such as updates to | 
|  | 3122 | * the GATT itself. | 
| Chris Wilson | 3b5724d | 2016-08-18 17:16:49 +0100 | [diff] [blame] | 3123 | * | 
|  | 3124 | * We also have to wait a bit for the writes to land from the GTT. | 
|  | 3125 | * An uncached read (i.e. mmio) seems to be ideal for the round-trip | 
|  | 3126 | * timing. This issue has only been observed when switching quickly | 
|  | 3127 | * between GTT writes and CPU reads from inside the kernel on recent hw, | 
|  | 3128 | * and it appears to only affect discrete GTT blocks (i.e. on LLC | 
|  | 3129 | * system agents we cannot reproduce this behaviour). | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3130 | */ | 
| Chris Wilson | 63256ec | 2011-01-04 18:42:07 +0000 | [diff] [blame] | 3131 | wmb(); | 
| Chris Wilson | 3b5724d | 2016-08-18 17:16:49 +0100 | [diff] [blame] | 3132 | if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 3133 | POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); | 
| Chris Wilson | 63256ec | 2011-01-04 18:42:07 +0000 | [diff] [blame] | 3134 |  | 
| Chris Wilson | d243ad8 | 2016-08-18 17:16:44 +0100 | [diff] [blame] | 3135 | intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT)); | 
| Daniel Vetter | f99d706 | 2014-06-19 16:01:59 +0200 | [diff] [blame] | 3136 |  | 
| Chris Wilson | b0dc465 | 2016-08-18 17:16:51 +0100 | [diff] [blame] | 3137 | obj->base.write_domain = 0; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3138 | trace_i915_gem_object_change_domain(obj, | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3139 | obj->base.read_domains, | 
| Chris Wilson | b0dc465 | 2016-08-18 17:16:51 +0100 | [diff] [blame] | 3140 | I915_GEM_DOMAIN_GTT); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3141 | } | 
|  | 3142 |  | 
|  | 3143 | /** Flushes the CPU write domain for the object if it's dirty. */ | 
|  | 3144 | static void | 
| Daniel Vetter | e62b59e | 2015-01-21 14:53:48 +0100 | [diff] [blame] | 3145 | i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3146 | { | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3147 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3148 | return; | 
|  | 3149 |  | 
| Chris Wilson | d0da48c | 2016-11-06 12:59:59 +0000 | [diff] [blame] | 3150 | i915_gem_clflush_object(obj, obj->pin_display); | 
| Rodrigo Vivi | de152b6 | 2015-07-07 16:28:51 -0700 | [diff] [blame] | 3151 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); | 
| Daniel Vetter | f99d706 | 2014-06-19 16:01:59 +0200 | [diff] [blame] | 3152 |  | 
| Chris Wilson | b0dc465 | 2016-08-18 17:16:51 +0100 | [diff] [blame] | 3153 | obj->base.write_domain = 0; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3154 | trace_i915_gem_object_change_domain(obj, | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3155 | obj->base.read_domains, | 
| Chris Wilson | b0dc465 | 2016-08-18 17:16:51 +0100 | [diff] [blame] | 3156 | I915_GEM_DOMAIN_CPU); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3157 | } | 
|  | 3158 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3159 | /** | 
|  | 3160 | * Moves a single object to the GTT read, and possibly write domain. | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 3161 | * @obj: object to act on | 
|  | 3162 | * @write: ask for write access or read only | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3163 | * | 
|  | 3164 | * This function returns when the move is complete, including waiting on | 
|  | 3165 | * flushes to occur. | 
|  | 3166 | */ | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 3167 | int | 
| Chris Wilson | 2021746 | 2010-11-23 15:26:33 +0000 | [diff] [blame] | 3168 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3169 | { | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3170 | uint32_t old_write_domain, old_read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3171 | int ret; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3172 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3173 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 
| Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 3174 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3175 | ret = i915_gem_object_wait(obj, | 
|  | 3176 | I915_WAIT_INTERRUPTIBLE | | 
|  | 3177 | I915_WAIT_LOCKED | | 
|  | 3178 | (write ? I915_WAIT_ALL : 0), | 
|  | 3179 | MAX_SCHEDULE_TIMEOUT, | 
|  | 3180 | NULL); | 
| Chris Wilson | 8824178 | 2011-01-07 17:09:48 +0000 | [diff] [blame] | 3181 | if (ret) | 
|  | 3182 | return ret; | 
|  | 3183 |  | 
| Chris Wilson | c13d87e | 2016-07-20 09:21:15 +0100 | [diff] [blame] | 3184 | if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) | 
|  | 3185 | return 0; | 
|  | 3186 |  | 
| Chris Wilson | 43566de | 2015-01-02 16:29:29 +0530 | [diff] [blame] | 3187 | /* Flush and acquire obj->pages so that we are coherent through | 
|  | 3188 | * direct access in memory with previous cached writes through | 
|  | 3189 | * shmemfs and that our cache domain tracking remains valid. | 
|  | 3190 | * For example, if the obj->filp was moved to swap without us | 
|  | 3191 | * being notified and releasing the pages, we would mistakenly | 
|  | 3192 | * continue to assume that the obj remained out of the CPU cached | 
|  | 3193 | * domain. | 
|  | 3194 | */ | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3195 | ret = i915_gem_object_pin_pages(obj); | 
| Chris Wilson | 43566de | 2015-01-02 16:29:29 +0530 | [diff] [blame] | 3196 | if (ret) | 
|  | 3197 | return ret; | 
|  | 3198 |  | 
| Daniel Vetter | e62b59e | 2015-01-21 14:53:48 +0100 | [diff] [blame] | 3199 | i915_gem_object_flush_cpu_write_domain(obj); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3200 |  | 
| Chris Wilson | d0a5778 | 2012-10-09 19:24:37 +0100 | [diff] [blame] | 3201 | /* Serialise direct access to this object with the barriers for | 
|  | 3202 | * coherent writes from the GPU, by effectively invalidating the | 
|  | 3203 | * GTT domain upon first access. | 
|  | 3204 | */ | 
|  | 3205 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) | 
|  | 3206 | mb(); | 
|  | 3207 |  | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3208 | old_write_domain = obj->base.write_domain; | 
|  | 3209 | old_read_domains = obj->base.read_domains; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3210 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3211 | /* It should now be out of any other write domains, and we can update | 
|  | 3212 | * the domain values for our changes. | 
|  | 3213 | */ | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 3214 | GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3215 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3216 | if (write) { | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3217 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; | 
|  | 3218 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3219 | obj->mm.dirty = true; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3220 | } | 
|  | 3221 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3222 | trace_i915_gem_object_change_domain(obj, | 
|  | 3223 | old_read_domains, | 
|  | 3224 | old_write_domain); | 
|  | 3225 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3226 | i915_gem_object_unpin_pages(obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3227 | return 0; | 
|  | 3228 | } | 
|  | 3229 |  | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3230 | /** | 
|  | 3231 | * Changes the cache-level of an object across all VMA. | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 3232 | * @obj: object to act on | 
|  | 3233 | * @cache_level: new cache level to set for the object | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3234 | * | 
|  | 3235 | * After this function returns, the object will be in the new cache-level | 
|  | 3236 | * across all GTT and the contents of the backing storage will be coherent, | 
|  | 3237 | * with respect to the new cache-level. In order to keep the backing storage | 
|  | 3238 | * coherent for all users, we only allow a single cache level to be set | 
|  | 3239 | * globally on the object and prevent it from being changed whilst the | 
|  | 3240 | * hardware is reading from the object. That is if the object is currently | 
|  | 3241 | * on the scanout it will be set to uncached (or equivalent display | 
|  | 3242 | * cache coherency) and all non-MOCS GPU access will also be uncached so | 
|  | 3243 | * that all direct access to the scanout remains coherent. | 
|  | 3244 | */ | 
| Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3245 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | 
|  | 3246 | enum i915_cache_level cache_level) | 
|  | 3247 | { | 
| Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 3248 | struct i915_vma *vma; | 
| Chris Wilson | a6a7cc4 | 2016-11-18 21:17:46 +0000 | [diff] [blame] | 3249 | int ret; | 
| Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3250 |  | 
| Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 3251 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 
|  | 3252 |  | 
| Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3253 | if (obj->cache_level == cache_level) | 
| Chris Wilson | a6a7cc4 | 2016-11-18 21:17:46 +0000 | [diff] [blame] | 3254 | return 0; | 
| Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3255 |  | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3256 | /* Inspect the list of currently bound VMA and unbind any that would | 
|  | 3257 | * be invalid given the new cache-level. This is principally to | 
|  | 3258 | * catch the issue of the CS prefetch crossing page boundaries and | 
|  | 3259 | * reading an invalid PTE on older architectures. | 
|  | 3260 | */ | 
| Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 3261 | restart: | 
|  | 3262 | list_for_each_entry(vma, &obj->vma_list, obj_link) { | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3263 | if (!drm_mm_node_allocated(&vma->node)) | 
|  | 3264 | continue; | 
|  | 3265 |  | 
| Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 3266 | if (i915_vma_is_pinned(vma)) { | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3267 | DRM_DEBUG("can not change the cache level of pinned objects\n"); | 
|  | 3268 | return -EBUSY; | 
|  | 3269 | } | 
|  | 3270 |  | 
| Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 3271 | if (i915_gem_valid_gtt_space(vma, cache_level)) | 
|  | 3272 | continue; | 
|  | 3273 |  | 
|  | 3274 | ret = i915_vma_unbind(vma); | 
|  | 3275 | if (ret) | 
|  | 3276 | return ret; | 
|  | 3277 |  | 
|  | 3278 | /* As unbinding may affect other elements in the | 
|  | 3279 | * obj->vma_list (due to side-effects from retiring | 
|  | 3280 | * an active vma), play safe and restart the iterator. | 
|  | 3281 | */ | 
|  | 3282 | goto restart; | 
| Chris Wilson | 42d6ab4 | 2012-07-26 11:49:32 +0100 | [diff] [blame] | 3283 | } | 
|  | 3284 |  | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3285 | /* We can reuse the existing drm_mm nodes but need to change the | 
|  | 3286 | * cache-level on the PTE. We could simply unbind them all and | 
|  | 3287 | * rebind with the correct cache-level on next use. However since | 
|  | 3288 | * we already have a valid slot, dma mapping, pages etc, we may as | 
|  | 3289 | * rewrite the PTE in the belief that doing so tramples upon less | 
|  | 3290 | * state and so involves less work. | 
|  | 3291 | */ | 
| Chris Wilson | 15717de | 2016-08-04 07:52:26 +0100 | [diff] [blame] | 3292 | if (obj->bind_count) { | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3293 | /* Before we change the PTE, the GPU must not be accessing it. | 
|  | 3294 | * If we wait upon the object, we know that all the bound | 
|  | 3295 | * VMA are no longer active. | 
|  | 3296 | */ | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3297 | ret = i915_gem_object_wait(obj, | 
|  | 3298 | I915_WAIT_INTERRUPTIBLE | | 
|  | 3299 | I915_WAIT_LOCKED | | 
|  | 3300 | I915_WAIT_ALL, | 
|  | 3301 | MAX_SCHEDULE_TIMEOUT, | 
|  | 3302 | NULL); | 
| Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3303 | if (ret) | 
|  | 3304 | return ret; | 
|  | 3305 |  | 
| Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 3306 | if (!HAS_LLC(to_i915(obj->base.dev)) && | 
|  | 3307 | cache_level != I915_CACHE_NONE) { | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3308 | /* Access to snoopable pages through the GTT is | 
|  | 3309 | * incoherent and on some machines causes a hard | 
|  | 3310 | * lockup. Relinquish the CPU mmaping to force | 
|  | 3311 | * userspace to refault in the pages and we can | 
|  | 3312 | * then double check if the GTT mapping is still | 
|  | 3313 | * valid for that pointer access. | 
|  | 3314 | */ | 
|  | 3315 | i915_gem_release_mmap(obj); | 
| Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3316 |  | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3317 | /* As we no longer need a fence for GTT access, | 
|  | 3318 | * we can relinquish it now (and so prevent having | 
|  | 3319 | * to steal a fence from someone else on the next | 
|  | 3320 | * fence request). Note GPU activity would have | 
|  | 3321 | * dropped the fence as all snoopable access is | 
|  | 3322 | * supposed to be linear. | 
|  | 3323 | */ | 
| Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 3324 | list_for_each_entry(vma, &obj->vma_list, obj_link) { | 
|  | 3325 | ret = i915_vma_put_fence(vma); | 
|  | 3326 | if (ret) | 
|  | 3327 | return ret; | 
|  | 3328 | } | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3329 | } else { | 
|  | 3330 | /* We either have incoherent backing store and | 
|  | 3331 | * so no GTT access or the architecture is fully | 
|  | 3332 | * coherent. In such cases, existing GTT mmaps | 
|  | 3333 | * ignore the cache bit in the PTE and we can | 
|  | 3334 | * rewrite it without confusing the GPU or having | 
|  | 3335 | * to force userspace to fault back in its mmaps. | 
|  | 3336 | */ | 
| Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3337 | } | 
|  | 3338 |  | 
| Chris Wilson | 1c7f4bc | 2016-02-26 11:03:19 +0000 | [diff] [blame] | 3339 | list_for_each_entry(vma, &obj->vma_list, obj_link) { | 
| Chris Wilson | ef55f92 | 2015-10-09 14:11:27 +0100 | [diff] [blame] | 3340 | if (!drm_mm_node_allocated(&vma->node)) | 
|  | 3341 | continue; | 
|  | 3342 |  | 
|  | 3343 | ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); | 
|  | 3344 | if (ret) | 
|  | 3345 | return ret; | 
|  | 3346 | } | 
| Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3347 | } | 
|  | 3348 |  | 
| Chris Wilson | a6a7cc4 | 2016-11-18 21:17:46 +0000 | [diff] [blame] | 3349 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU && | 
|  | 3350 | cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) | 
|  | 3351 | obj->cache_dirty = true; | 
|  | 3352 |  | 
| Chris Wilson | 1c7f4bc | 2016-02-26 11:03:19 +0000 | [diff] [blame] | 3353 | list_for_each_entry(vma, &obj->vma_list, obj_link) | 
| Chris Wilson | 2c22569 | 2013-08-09 12:26:45 +0100 | [diff] [blame] | 3354 | vma->node.color = cache_level; | 
|  | 3355 | obj->cache_level = cache_level; | 
|  | 3356 |  | 
| Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3357 | return 0; | 
|  | 3358 | } | 
|  | 3359 |  | 
| Ben Widawsky | 199adf4 | 2012-09-21 17:01:20 -0700 | [diff] [blame] | 3360 | int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, | 
|  | 3361 | struct drm_file *file) | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3362 | { | 
| Ben Widawsky | 199adf4 | 2012-09-21 17:01:20 -0700 | [diff] [blame] | 3363 | struct drm_i915_gem_caching *args = data; | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3364 | struct drm_i915_gem_object *obj; | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 3365 | int err = 0; | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3366 |  | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 3367 | rcu_read_lock(); | 
|  | 3368 | obj = i915_gem_object_lookup_rcu(file, args->handle); | 
|  | 3369 | if (!obj) { | 
|  | 3370 | err = -ENOENT; | 
|  | 3371 | goto out; | 
|  | 3372 | } | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3373 |  | 
| Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 3374 | switch (obj->cache_level) { | 
|  | 3375 | case I915_CACHE_LLC: | 
|  | 3376 | case I915_CACHE_L3_LLC: | 
|  | 3377 | args->caching = I915_CACHING_CACHED; | 
|  | 3378 | break; | 
|  | 3379 |  | 
| Chris Wilson | 4257d3b | 2013-08-08 14:41:11 +0100 | [diff] [blame] | 3380 | case I915_CACHE_WT: | 
|  | 3381 | args->caching = I915_CACHING_DISPLAY; | 
|  | 3382 | break; | 
|  | 3383 |  | 
| Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 3384 | default: | 
|  | 3385 | args->caching = I915_CACHING_NONE; | 
|  | 3386 | break; | 
|  | 3387 | } | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 3388 | out: | 
|  | 3389 | rcu_read_unlock(); | 
|  | 3390 | return err; | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3391 | } | 
|  | 3392 |  | 
| Ben Widawsky | 199adf4 | 2012-09-21 17:01:20 -0700 | [diff] [blame] | 3393 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | 
|  | 3394 | struct drm_file *file) | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3395 | { | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 3396 | struct drm_i915_private *i915 = to_i915(dev); | 
| Ben Widawsky | 199adf4 | 2012-09-21 17:01:20 -0700 | [diff] [blame] | 3397 | struct drm_i915_gem_caching *args = data; | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3398 | struct drm_i915_gem_object *obj; | 
|  | 3399 | enum i915_cache_level level; | 
| Chris Wilson | d65415d | 2017-01-19 08:22:10 +0000 | [diff] [blame^] | 3400 | int ret = 0; | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3401 |  | 
| Ben Widawsky | 199adf4 | 2012-09-21 17:01:20 -0700 | [diff] [blame] | 3402 | switch (args->caching) { | 
|  | 3403 | case I915_CACHING_NONE: | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3404 | level = I915_CACHE_NONE; | 
|  | 3405 | break; | 
| Ben Widawsky | 199adf4 | 2012-09-21 17:01:20 -0700 | [diff] [blame] | 3406 | case I915_CACHING_CACHED: | 
| Imre Deak | e5756c1 | 2015-08-14 18:43:30 +0300 | [diff] [blame] | 3407 | /* | 
|  | 3408 | * Due to a HW issue on BXT A stepping, GPU stores via a | 
|  | 3409 | * snooped mapping may leave stale data in a corresponding CPU | 
|  | 3410 | * cacheline, whereas normally such cachelines would get | 
|  | 3411 | * invalidated. | 
|  | 3412 | */ | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 3413 | if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) | 
| Imre Deak | e5756c1 | 2015-08-14 18:43:30 +0300 | [diff] [blame] | 3414 | return -ENODEV; | 
|  | 3415 |  | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3416 | level = I915_CACHE_LLC; | 
|  | 3417 | break; | 
| Chris Wilson | 4257d3b | 2013-08-08 14:41:11 +0100 | [diff] [blame] | 3418 | case I915_CACHING_DISPLAY: | 
| Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 3419 | level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; | 
| Chris Wilson | 4257d3b | 2013-08-08 14:41:11 +0100 | [diff] [blame] | 3420 | break; | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3421 | default: | 
|  | 3422 | return -EINVAL; | 
|  | 3423 | } | 
|  | 3424 |  | 
| Chris Wilson | d65415d | 2017-01-19 08:22:10 +0000 | [diff] [blame^] | 3425 | obj = i915_gem_object_lookup(file, args->handle); | 
|  | 3426 | if (!obj) | 
|  | 3427 | return -ENOENT; | 
|  | 3428 |  | 
|  | 3429 | if (obj->cache_level == level) | 
|  | 3430 | goto out; | 
|  | 3431 |  | 
|  | 3432 | ret = i915_gem_object_wait(obj, | 
|  | 3433 | I915_WAIT_INTERRUPTIBLE, | 
|  | 3434 | MAX_SCHEDULE_TIMEOUT, | 
|  | 3435 | to_rps_client(file)); | 
|  | 3436 | if (ret) | 
|  | 3437 | goto out; | 
|  | 3438 |  | 
| Ben Widawsky | 3bc2913 | 2012-09-26 16:15:20 -0700 | [diff] [blame] | 3439 | ret = i915_mutex_lock_interruptible(dev); | 
|  | 3440 | if (ret) | 
| Chris Wilson | d65415d | 2017-01-19 08:22:10 +0000 | [diff] [blame^] | 3441 | goto out; | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3442 |  | 
|  | 3443 | ret = i915_gem_object_set_cache_level(obj, level); | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3444 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | d65415d | 2017-01-19 08:22:10 +0000 | [diff] [blame^] | 3445 |  | 
|  | 3446 | out: | 
|  | 3447 | i915_gem_object_put(obj); | 
| Chris Wilson | e6994ae | 2012-07-10 10:27:08 +0100 | [diff] [blame] | 3448 | return ret; | 
|  | 3449 | } | 
|  | 3450 |  | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 3451 | /* | 
| Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3452 | * Prepare buffer for display plane (scanout, cursors, etc). | 
|  | 3453 | * Can be called from an uninterruptible phase (modesetting) and allows | 
|  | 3454 | * any flushes to be pipelined (for pageflips). | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 3455 | */ | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3456 | struct i915_vma * | 
| Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3457 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | 
|  | 3458 | u32 alignment, | 
| Tvrtko Ursulin | e661733 | 2015-03-23 11:10:33 +0000 | [diff] [blame] | 3459 | const struct i915_ggtt_view *view) | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 3460 | { | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3461 | struct i915_vma *vma; | 
| Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3462 | u32 old_read_domains, old_write_domain; | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 3463 | int ret; | 
|  | 3464 |  | 
| Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 3465 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 
|  | 3466 |  | 
| Chris Wilson | cc98b41 | 2013-08-09 12:25:09 +0100 | [diff] [blame] | 3467 | /* Mark the pin_display early so that we account for the | 
|  | 3468 | * display coherency whilst setting up the cache domains. | 
|  | 3469 | */ | 
| Tvrtko Ursulin | 8a0c39b | 2015-04-13 11:50:09 +0100 | [diff] [blame] | 3470 | obj->pin_display++; | 
| Chris Wilson | cc98b41 | 2013-08-09 12:25:09 +0100 | [diff] [blame] | 3471 |  | 
| Eric Anholt | a7ef064 | 2011-03-29 16:59:54 -0700 | [diff] [blame] | 3472 | /* The display engine is not coherent with the LLC cache on gen6.  As | 
|  | 3473 | * a result, we make sure that the pinning that is about to occur is | 
|  | 3474 | * done with uncached PTEs. This is lowest common denominator for all | 
|  | 3475 | * chipsets. | 
|  | 3476 | * | 
|  | 3477 | * However for gen6+, we could do better by using the GFDT bit instead | 
|  | 3478 | * of uncaching, which would allow us to flush all the LLC-cached data | 
|  | 3479 | * with that bit in the PTE to main memory with just one PIPE_CONTROL. | 
|  | 3480 | */ | 
| Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 3481 | ret = i915_gem_object_set_cache_level(obj, | 
| Tvrtko Ursulin | 8652744 | 2016-10-13 11:03:00 +0100 | [diff] [blame] | 3482 | HAS_WT(to_i915(obj->base.dev)) ? | 
|  | 3483 | I915_CACHE_WT : I915_CACHE_NONE); | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3484 | if (ret) { | 
|  | 3485 | vma = ERR_PTR(ret); | 
| Chris Wilson | cc98b41 | 2013-08-09 12:25:09 +0100 | [diff] [blame] | 3486 | goto err_unpin_display; | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3487 | } | 
| Eric Anholt | a7ef064 | 2011-03-29 16:59:54 -0700 | [diff] [blame] | 3488 |  | 
| Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3489 | /* As the user may map the buffer once pinned in the display plane | 
|  | 3490 | * (e.g. libkms for the bootup splash), we have to ensure that we | 
| Chris Wilson | 2efb813 | 2016-08-18 17:17:06 +0100 | [diff] [blame] | 3491 | * always use map_and_fenceable for all scanout buffers. However, | 
|  | 3492 | * it may simply be too big to fit into mappable, in which case | 
|  | 3493 | * put it anyway and hope that userspace can cope (but always first | 
|  | 3494 | * try to preserve the existing ABI). | 
| Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3495 | */ | 
| Chris Wilson | 2efb813 | 2016-08-18 17:17:06 +0100 | [diff] [blame] | 3496 | vma = ERR_PTR(-ENOSPC); | 
| Chris Wilson | 47a8e3f | 2017-01-14 00:28:27 +0000 | [diff] [blame] | 3497 | if (!view || view->type == I915_GGTT_VIEW_NORMAL) | 
| Chris Wilson | 2efb813 | 2016-08-18 17:17:06 +0100 | [diff] [blame] | 3498 | vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, | 
|  | 3499 | PIN_MAPPABLE | PIN_NONBLOCK); | 
| Chris Wilson | 767a222 | 2016-11-07 11:01:28 +0000 | [diff] [blame] | 3500 | if (IS_ERR(vma)) { | 
|  | 3501 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 
|  | 3502 | unsigned int flags; | 
|  | 3503 |  | 
|  | 3504 | /* Valleyview is definitely limited to scanning out the first | 
|  | 3505 | * 512MiB. Lets presume this behaviour was inherited from the | 
|  | 3506 | * g4x display engine and that all earlier gen are similarly | 
|  | 3507 | * limited. Testing suggests that it is a little more | 
|  | 3508 | * complicated than this. For example, Cherryview appears quite | 
|  | 3509 | * happy to scanout from anywhere within its global aperture. | 
|  | 3510 | */ | 
|  | 3511 | flags = 0; | 
|  | 3512 | if (HAS_GMCH_DISPLAY(i915)) | 
|  | 3513 | flags = PIN_MAPPABLE; | 
|  | 3514 | vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); | 
|  | 3515 | } | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3516 | if (IS_ERR(vma)) | 
| Chris Wilson | cc98b41 | 2013-08-09 12:25:09 +0100 | [diff] [blame] | 3517 | goto err_unpin_display; | 
| Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3518 |  | 
| Chris Wilson | d8923dc | 2016-08-18 17:17:07 +0100 | [diff] [blame] | 3519 | vma->display_alignment = max_t(u64, vma->display_alignment, alignment); | 
|  | 3520 |  | 
| Chris Wilson | a6a7cc4 | 2016-11-18 21:17:46 +0000 | [diff] [blame] | 3521 | /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ | 
|  | 3522 | if (obj->cache_dirty) { | 
|  | 3523 | i915_gem_clflush_object(obj, true); | 
|  | 3524 | intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); | 
|  | 3525 | } | 
| Chris Wilson | b118c1e | 2010-05-27 13:18:14 +0100 | [diff] [blame] | 3526 |  | 
| Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3527 | old_write_domain = obj->base.write_domain; | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3528 | old_read_domains = obj->base.read_domains; | 
| Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3529 |  | 
|  | 3530 | /* It should now be out of any other write domains, and we can update | 
|  | 3531 | * the domain values for our changes. | 
|  | 3532 | */ | 
| Chris Wilson | e5f1d96 | 2012-07-20 12:41:00 +0100 | [diff] [blame] | 3533 | obj->base.write_domain = 0; | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3534 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 3535 |  | 
|  | 3536 | trace_i915_gem_object_change_domain(obj, | 
|  | 3537 | old_read_domains, | 
| Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3538 | old_write_domain); | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 3539 |  | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3540 | return vma; | 
| Chris Wilson | cc98b41 | 2013-08-09 12:25:09 +0100 | [diff] [blame] | 3541 |  | 
|  | 3542 | err_unpin_display: | 
| Tvrtko Ursulin | 8a0c39b | 2015-04-13 11:50:09 +0100 | [diff] [blame] | 3543 | obj->pin_display--; | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3544 | return vma; | 
| Chris Wilson | cc98b41 | 2013-08-09 12:25:09 +0100 | [diff] [blame] | 3545 | } | 
|  | 3546 |  | 
|  | 3547 | void | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3548 | i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) | 
| Chris Wilson | cc98b41 | 2013-08-09 12:25:09 +0100 | [diff] [blame] | 3549 | { | 
| Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 3550 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); | 
| Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 3551 |  | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3552 | if (WARN_ON(vma->obj->pin_display == 0)) | 
| Tvrtko Ursulin | 8a0c39b | 2015-04-13 11:50:09 +0100 | [diff] [blame] | 3553 | return; | 
|  | 3554 |  | 
| Chris Wilson | d8923dc | 2016-08-18 17:17:07 +0100 | [diff] [blame] | 3555 | if (--vma->obj->pin_display == 0) | 
| Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 3556 | vma->display_alignment = I915_GTT_MIN_ALIGNMENT; | 
| Tvrtko Ursulin | e661733 | 2015-03-23 11:10:33 +0000 | [diff] [blame] | 3557 |  | 
| Chris Wilson | 383d582 | 2016-08-18 17:17:08 +0100 | [diff] [blame] | 3558 | /* Bump the LRU to try and avoid premature eviction whilst flipping  */ | 
|  | 3559 | if (!i915_vma_is_active(vma)) | 
|  | 3560 | list_move_tail(&vma->vm_link, &vma->vm->inactive_list); | 
|  | 3561 |  | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3562 | i915_vma_unpin(vma); | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 3563 | } | 
|  | 3564 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3565 | /** | 
|  | 3566 | * Moves a single object to the CPU read, and possibly write domain. | 
| Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 3567 | * @obj: object to act on | 
|  | 3568 | * @write: requesting write or read-only access | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3569 | * | 
|  | 3570 | * This function returns when the move is complete, including waiting on | 
|  | 3571 | * flushes to occur. | 
|  | 3572 | */ | 
| Chris Wilson | dabdfe0 | 2012-03-26 10:10:27 +0200 | [diff] [blame] | 3573 | int | 
| Chris Wilson | 919926a | 2010-11-12 13:42:53 +0000 | [diff] [blame] | 3574 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3575 | { | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3576 | uint32_t old_write_domain, old_read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3577 | int ret; | 
|  | 3578 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3579 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 
| Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 3580 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3581 | ret = i915_gem_object_wait(obj, | 
|  | 3582 | I915_WAIT_INTERRUPTIBLE | | 
|  | 3583 | I915_WAIT_LOCKED | | 
|  | 3584 | (write ? I915_WAIT_ALL : 0), | 
|  | 3585 | MAX_SCHEDULE_TIMEOUT, | 
|  | 3586 | NULL); | 
| Chris Wilson | 8824178 | 2011-01-07 17:09:48 +0000 | [diff] [blame] | 3587 | if (ret) | 
|  | 3588 | return ret; | 
|  | 3589 |  | 
| Chris Wilson | c13d87e | 2016-07-20 09:21:15 +0100 | [diff] [blame] | 3590 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) | 
|  | 3591 | return 0; | 
|  | 3592 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3593 | i915_gem_object_flush_gtt_write_domain(obj); | 
|  | 3594 |  | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3595 | old_write_domain = obj->base.write_domain; | 
|  | 3596 | old_read_domains = obj->base.read_domains; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3597 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3598 | /* Flush the CPU cache if it's still invalid. */ | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3599 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 
| Chris Wilson | 2c22569 | 2013-08-09 12:26:45 +0100 | [diff] [blame] | 3600 | i915_gem_clflush_object(obj, false); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3601 |  | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3602 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3603 | } | 
|  | 3604 |  | 
|  | 3605 | /* It should now be out of any other write domains, and we can update | 
|  | 3606 | * the domain values for our changes. | 
|  | 3607 | */ | 
| Chris Wilson | 40e62d5 | 2016-10-28 13:58:41 +0100 | [diff] [blame] | 3608 | GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3609 |  | 
|  | 3610 | /* If we're writing through the CPU, then the GPU read domains will | 
|  | 3611 | * need to be invalidated at next use. | 
|  | 3612 | */ | 
|  | 3613 | if (write) { | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3614 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 
|  | 3615 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3616 | } | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3617 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3618 | trace_i915_gem_object_change_domain(obj, | 
|  | 3619 | old_read_domains, | 
|  | 3620 | old_write_domain); | 
|  | 3621 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3622 | return 0; | 
|  | 3623 | } | 
|  | 3624 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3625 | /* Throttle our rendering by waiting until the ring has completed our requests | 
|  | 3626 | * emitted over 20 msec ago. | 
|  | 3627 | * | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3628 | * Note that if we were to use the current jiffies each time around the loop, | 
|  | 3629 | * we wouldn't escape the function with any frames outstanding if the time to | 
|  | 3630 | * render a frame was over 20ms. | 
|  | 3631 | * | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3632 | * This should get us reasonable parallelism between CPU and GPU but also | 
|  | 3633 | * relatively low latency when blocking on a particular request to finish. | 
|  | 3634 | */ | 
|  | 3635 | static int | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3636 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3637 | { | 
| Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 3638 | struct drm_i915_private *dev_priv = to_i915(dev); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3639 | struct drm_i915_file_private *file_priv = file->driver_priv; | 
| Chris Wilson | d0bc54f | 2015-05-21 21:01:48 +0100 | [diff] [blame] | 3640 | unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; | 
| John Harrison | 54fb241 | 2014-11-24 18:49:27 +0000 | [diff] [blame] | 3641 | struct drm_i915_gem_request *request, *target = NULL; | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3642 | long ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3643 |  | 
| Chris Wilson | f4457ae | 2016-04-13 17:35:08 +0100 | [diff] [blame] | 3644 | /* ABI: return -EIO if already wedged */ | 
|  | 3645 | if (i915_terminally_wedged(&dev_priv->gpu_error)) | 
|  | 3646 | return -EIO; | 
| Chris Wilson | e110e8d | 2011-01-26 15:39:14 +0000 | [diff] [blame] | 3647 |  | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 3648 | spin_lock(&file_priv->mm.lock); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3649 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3650 | if (time_after_eq(request->emitted_jiffies, recent_enough)) | 
|  | 3651 | break; | 
|  | 3652 |  | 
| John Harrison | fcfa423c | 2015-05-29 17:44:12 +0100 | [diff] [blame] | 3653 | /* | 
|  | 3654 | * Note that the request might not have been submitted yet. | 
|  | 3655 | * In which case emitted_jiffies will be zero. | 
|  | 3656 | */ | 
|  | 3657 | if (!request->emitted_jiffies) | 
|  | 3658 | continue; | 
|  | 3659 |  | 
| John Harrison | 54fb241 | 2014-11-24 18:49:27 +0000 | [diff] [blame] | 3660 | target = request; | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3661 | } | 
| John Harrison | ff86588 | 2014-11-24 18:49:28 +0000 | [diff] [blame] | 3662 | if (target) | 
| Chris Wilson | e8a261e | 2016-07-20 13:31:49 +0100 | [diff] [blame] | 3663 | i915_gem_request_get(target); | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 3664 | spin_unlock(&file_priv->mm.lock); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3665 |  | 
| John Harrison | 54fb241 | 2014-11-24 18:49:27 +0000 | [diff] [blame] | 3666 | if (target == NULL) | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3667 | return 0; | 
|  | 3668 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3669 | ret = i915_wait_request(target, | 
|  | 3670 | I915_WAIT_INTERRUPTIBLE, | 
|  | 3671 | MAX_SCHEDULE_TIMEOUT); | 
| Chris Wilson | e8a261e | 2016-07-20 13:31:49 +0100 | [diff] [blame] | 3672 | i915_gem_request_put(target); | 
| John Harrison | ff86588 | 2014-11-24 18:49:28 +0000 | [diff] [blame] | 3673 |  | 
| Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3674 | return ret < 0 ? ret : 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3675 | } | 
|  | 3676 |  | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3677 | struct i915_vma * | 
| Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 3678 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, | 
|  | 3679 | const struct i915_ggtt_view *view, | 
| Chris Wilson | 91b2db6 | 2016-08-04 16:32:23 +0100 | [diff] [blame] | 3680 | u64 size, | 
| Chris Wilson | 2ffffd0 | 2016-08-04 16:32:22 +0100 | [diff] [blame] | 3681 | u64 alignment, | 
|  | 3682 | u64 flags) | 
| Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 3683 | { | 
| Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 3684 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | 
|  | 3685 | struct i915_address_space *vm = &dev_priv->ggtt.base; | 
| Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 3686 | struct i915_vma *vma; | 
|  | 3687 | int ret; | 
| Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 3688 |  | 
| Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 3689 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 
|  | 3690 |  | 
| Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 3691 | vma = i915_vma_instance(obj, vm, view); | 
| Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 3692 | if (IS_ERR(vma)) | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3693 | return vma; | 
| Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 3694 |  | 
|  | 3695 | if (i915_vma_misplaced(vma, size, alignment, flags)) { | 
|  | 3696 | if (flags & PIN_NONBLOCK && | 
|  | 3697 | (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3698 | return ERR_PTR(-ENOSPC); | 
| Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 3699 |  | 
| Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 3700 | if (flags & PIN_MAPPABLE) { | 
| Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 3701 | /* If the required space is larger than the available | 
|  | 3702 | * aperture, we will not able to find a slot for the | 
|  | 3703 | * object and unbinding the object now will be in | 
|  | 3704 | * vain. Worse, doing so may cause us to ping-pong | 
|  | 3705 | * the object in and out of the Global GTT and | 
|  | 3706 | * waste a lot of cycles under the mutex. | 
|  | 3707 | */ | 
| Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 3708 | if (vma->fence_size > dev_priv->ggtt.mappable_end) | 
| Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 3709 | return ERR_PTR(-E2BIG); | 
|  | 3710 |  | 
|  | 3711 | /* If NONBLOCK is set the caller is optimistically | 
|  | 3712 | * trying to cache the full object within the mappable | 
|  | 3713 | * aperture, and *must* have a fallback in place for | 
|  | 3714 | * situations where we cannot bind the object. We | 
|  | 3715 | * can be a little more lax here and use the fallback | 
|  | 3716 | * more often to avoid costly migrations of ourselves | 
|  | 3717 | * and other objects within the aperture. | 
|  | 3718 | * | 
|  | 3719 | * Half-the-aperture is used as a simple heuristic. | 
|  | 3720 | * More interesting would to do search for a free | 
|  | 3721 | * block prior to making the commitment to unbind. | 
|  | 3722 | * That caters for the self-harm case, and with a | 
|  | 3723 | * little more heuristics (e.g. NOFAULT, NOEVICT) | 
|  | 3724 | * we could try to minimise harm to others. | 
|  | 3725 | */ | 
|  | 3726 | if (flags & PIN_NONBLOCK && | 
| Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 3727 | vma->fence_size > dev_priv->ggtt.mappable_end / 2) | 
| Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 3728 | return ERR_PTR(-ENOSPC); | 
|  | 3729 | } | 
|  | 3730 |  | 
| Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 3731 | WARN(i915_vma_is_pinned(vma), | 
|  | 3732 | "bo is already pinned in ggtt with incorrect alignment:" | 
| Chris Wilson | 05a20d0 | 2016-08-18 17:16:55 +0100 | [diff] [blame] | 3733 | " offset=%08x, req.alignment=%llx," | 
|  | 3734 | " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", | 
|  | 3735 | i915_ggtt_offset(vma), alignment, | 
| Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 3736 | !!(flags & PIN_MAPPABLE), | 
| Chris Wilson | 05a20d0 | 2016-08-18 17:16:55 +0100 | [diff] [blame] | 3737 | i915_vma_is_map_and_fenceable(vma)); | 
| Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 3738 | ret = i915_vma_unbind(vma); | 
|  | 3739 | if (ret) | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3740 | return ERR_PTR(ret); | 
| Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 3741 | } | 
|  | 3742 |  | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3743 | ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); | 
|  | 3744 | if (ret) | 
|  | 3745 | return ERR_PTR(ret); | 
| Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 3746 |  | 
| Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3747 | return vma; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3748 | } | 
|  | 3749 |  | 
| Chris Wilson | edf6b76 | 2016-08-09 09:23:33 +0100 | [diff] [blame] | 3750 | static __always_inline unsigned int __busy_read_flag(unsigned int id) | 
| Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 3751 | { | 
|  | 3752 | /* Note that we could alias engines in the execbuf API, but | 
|  | 3753 | * that would be very unwise as it prevents userspace from | 
|  | 3754 | * fine control over engine selection. Ahem. | 
|  | 3755 | * | 
|  | 3756 | * This should be something like EXEC_MAX_ENGINE instead of | 
|  | 3757 | * I915_NUM_ENGINES. | 
|  | 3758 | */ | 
|  | 3759 | BUILD_BUG_ON(I915_NUM_ENGINES > 16); | 
|  | 3760 | return 0x10000 << id; | 
|  | 3761 | } | 
|  | 3762 |  | 
|  | 3763 | static __always_inline unsigned int __busy_write_id(unsigned int id) | 
|  | 3764 | { | 
| Chris Wilson | 70cb472 | 2016-08-09 18:08:25 +0100 | [diff] [blame] | 3765 | /* The uABI guarantees an active writer is also amongst the read | 
|  | 3766 | * engines. This would be true if we accessed the activity tracking | 
|  | 3767 | * under the lock, but as we perform the lookup of the object and | 
|  | 3768 | * its activity locklessly we can not guarantee that the last_write | 
|  | 3769 | * being active implies that we have set the same engine flag from | 
|  | 3770 | * last_read - hence we always set both read and write busy for | 
|  | 3771 | * last_write. | 
|  | 3772 | */ | 
|  | 3773 | return id | __busy_read_flag(id); | 
| Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 3774 | } | 
|  | 3775 |  | 
| Chris Wilson | edf6b76 | 2016-08-09 09:23:33 +0100 | [diff] [blame] | 3776 | static __always_inline unsigned int | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3777 | __busy_set_if_active(const struct dma_fence *fence, | 
| Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 3778 | unsigned int (*flag)(unsigned int id)) | 
|  | 3779 | { | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3780 | struct drm_i915_gem_request *rq; | 
| Chris Wilson | 1255501 | 2016-08-16 09:50:40 +0100 | [diff] [blame] | 3781 |  | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3782 | /* We have to check the current hw status of the fence as the uABI | 
|  | 3783 | * guarantees forward progress. We could rely on the idle worker | 
|  | 3784 | * to eventually flush us, but to minimise latency just ask the | 
|  | 3785 | * hardware. | 
|  | 3786 | * | 
|  | 3787 | * Note we only report on the status of native fences. | 
|  | 3788 | */ | 
|  | 3789 | if (!dma_fence_is_i915(fence)) | 
| Chris Wilson | 1255501 | 2016-08-16 09:50:40 +0100 | [diff] [blame] | 3790 | return 0; | 
|  | 3791 |  | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3792 | /* opencode to_request() in order to avoid const warnings */ | 
|  | 3793 | rq = container_of(fence, struct drm_i915_gem_request, fence); | 
|  | 3794 | if (i915_gem_request_completed(rq)) | 
|  | 3795 | return 0; | 
|  | 3796 |  | 
|  | 3797 | return flag(rq->engine->exec_id); | 
| Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 3798 | } | 
|  | 3799 |  | 
| Chris Wilson | edf6b76 | 2016-08-09 09:23:33 +0100 | [diff] [blame] | 3800 | static __always_inline unsigned int | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3801 | busy_check_reader(const struct dma_fence *fence) | 
| Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 3802 | { | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3803 | return __busy_set_if_active(fence, __busy_read_flag); | 
| Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 3804 | } | 
|  | 3805 |  | 
| Chris Wilson | edf6b76 | 2016-08-09 09:23:33 +0100 | [diff] [blame] | 3806 | static __always_inline unsigned int | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3807 | busy_check_writer(const struct dma_fence *fence) | 
| Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 3808 | { | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3809 | if (!fence) | 
|  | 3810 | return 0; | 
|  | 3811 |  | 
|  | 3812 | return __busy_set_if_active(fence, __busy_write_id); | 
| Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 3813 | } | 
|  | 3814 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3815 | int | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3816 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3817 | struct drm_file *file) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3818 | { | 
|  | 3819 | struct drm_i915_gem_busy *args = data; | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3820 | struct drm_i915_gem_object *obj; | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3821 | struct reservation_object_list *list; | 
|  | 3822 | unsigned int seq; | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 3823 | int err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3824 |  | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3825 | err = -ENOENT; | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 3826 | rcu_read_lock(); | 
|  | 3827 | obj = i915_gem_object_lookup_rcu(file, args->handle); | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3828 | if (!obj) | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 3829 | goto out; | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3830 |  | 
|  | 3831 | /* A discrepancy here is that we do not report the status of | 
|  | 3832 | * non-i915 fences, i.e. even though we may report the object as idle, | 
|  | 3833 | * a call to set-domain may still stall waiting for foreign rendering. | 
|  | 3834 | * This also means that wait-ioctl may report an object as busy, | 
|  | 3835 | * where busy-ioctl considers it idle. | 
|  | 3836 | * | 
|  | 3837 | * We trade the ability to warn of foreign fences to report on which | 
|  | 3838 | * i915 engines are active for the object. | 
|  | 3839 | * | 
|  | 3840 | * Alternatively, we can trade that extra information on read/write | 
|  | 3841 | * activity with | 
|  | 3842 | *	args->busy = | 
|  | 3843 | *		!reservation_object_test_signaled_rcu(obj->resv, true); | 
|  | 3844 | * to report the overall busyness. This is what the wait-ioctl does. | 
|  | 3845 | * | 
|  | 3846 | */ | 
|  | 3847 | retry: | 
|  | 3848 | seq = raw_read_seqcount(&obj->resv->seq); | 
|  | 3849 |  | 
|  | 3850 | /* Translate the exclusive fence to the READ *and* WRITE engine */ | 
|  | 3851 | args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); | 
|  | 3852 |  | 
|  | 3853 | /* Translate shared fences to READ set of engines */ | 
|  | 3854 | list = rcu_dereference(obj->resv->fence); | 
|  | 3855 | if (list) { | 
|  | 3856 | unsigned int shared_count = list->shared_count, i; | 
|  | 3857 |  | 
|  | 3858 | for (i = 0; i < shared_count; ++i) { | 
|  | 3859 | struct dma_fence *fence = | 
|  | 3860 | rcu_dereference(list->shared[i]); | 
|  | 3861 |  | 
|  | 3862 | args->busy |= busy_check_reader(fence); | 
|  | 3863 | } | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 3864 | } | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 3865 |  | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3866 | if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) | 
|  | 3867 | goto retry; | 
| Chris Wilson | 426960b | 2016-01-15 16:51:46 +0000 | [diff] [blame] | 3868 |  | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3869 | err = 0; | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 3870 | out: | 
|  | 3871 | rcu_read_unlock(); | 
|  | 3872 | return err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3873 | } | 
|  | 3874 |  | 
|  | 3875 | int | 
|  | 3876 | i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | 
|  | 3877 | struct drm_file *file_priv) | 
|  | 3878 | { | 
| Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 3879 | return i915_gem_ring_throttle(dev, file_priv); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3880 | } | 
|  | 3881 |  | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 3882 | int | 
|  | 3883 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | 
|  | 3884 | struct drm_file *file_priv) | 
|  | 3885 | { | 
| Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 3886 | struct drm_i915_private *dev_priv = to_i915(dev); | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 3887 | struct drm_i915_gem_madvise *args = data; | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3888 | struct drm_i915_gem_object *obj; | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3889 | int err; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 3890 |  | 
|  | 3891 | switch (args->madv) { | 
|  | 3892 | case I915_MADV_DONTNEED: | 
|  | 3893 | case I915_MADV_WILLNEED: | 
|  | 3894 | break; | 
|  | 3895 | default: | 
|  | 3896 | return -EINVAL; | 
|  | 3897 | } | 
|  | 3898 |  | 
| Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 3899 | obj = i915_gem_object_lookup(file_priv, args->handle); | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3900 | if (!obj) | 
|  | 3901 | return -ENOENT; | 
|  | 3902 |  | 
|  | 3903 | err = mutex_lock_interruptible(&obj->mm.lock); | 
|  | 3904 | if (err) | 
|  | 3905 | goto out; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 3906 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3907 | if (obj->mm.pages && | 
| Chris Wilson | 3e510a8 | 2016-08-05 10:14:23 +0100 | [diff] [blame] | 3908 | i915_gem_object_is_tiled(obj) && | 
| Daniel Vetter | 656bfa3 | 2014-11-20 09:26:30 +0100 | [diff] [blame] | 3909 | dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { | 
| Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 3910 | if (obj->mm.madv == I915_MADV_WILLNEED) { | 
|  | 3911 | GEM_BUG_ON(!obj->mm.quirked); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3912 | __i915_gem_object_unpin_pages(obj); | 
| Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 3913 | obj->mm.quirked = false; | 
|  | 3914 | } | 
|  | 3915 | if (args->madv == I915_MADV_WILLNEED) { | 
| Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 3916 | GEM_BUG_ON(obj->mm.quirked); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3917 | __i915_gem_object_pin_pages(obj); | 
| Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 3918 | obj->mm.quirked = true; | 
|  | 3919 | } | 
| Daniel Vetter | 656bfa3 | 2014-11-20 09:26:30 +0100 | [diff] [blame] | 3920 | } | 
|  | 3921 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3922 | if (obj->mm.madv != __I915_MADV_PURGED) | 
|  | 3923 | obj->mm.madv = args->madv; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 3924 |  | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 3925 | /* if the object is no longer attached, discard its backing storage */ | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3926 | if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages) | 
| Chris Wilson | 2d7ef39 | 2009-09-20 23:13:10 +0100 | [diff] [blame] | 3927 | i915_gem_object_truncate(obj); | 
|  | 3928 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3929 | args->retained = obj->mm.madv != __I915_MADV_PURGED; | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3930 | mutex_unlock(&obj->mm.lock); | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 3931 |  | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3932 | out: | 
| Chris Wilson | f8c417c | 2016-07-20 13:31:53 +0100 | [diff] [blame] | 3933 | i915_gem_object_put(obj); | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3934 | return err; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 3935 | } | 
|  | 3936 |  | 
| Chris Wilson | 5b8c8ae | 2016-11-16 19:07:04 +0000 | [diff] [blame] | 3937 | static void | 
|  | 3938 | frontbuffer_retire(struct i915_gem_active *active, | 
|  | 3939 | struct drm_i915_gem_request *request) | 
|  | 3940 | { | 
|  | 3941 | struct drm_i915_gem_object *obj = | 
|  | 3942 | container_of(active, typeof(*obj), frontbuffer_write); | 
|  | 3943 |  | 
|  | 3944 | intel_fb_obj_flush(obj, true, ORIGIN_CS); | 
|  | 3945 | } | 
|  | 3946 |  | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 3947 | void i915_gem_object_init(struct drm_i915_gem_object *obj, | 
|  | 3948 | const struct drm_i915_gem_object_ops *ops) | 
| Chris Wilson | 0327d6b | 2012-08-11 15:41:06 +0100 | [diff] [blame] | 3949 | { | 
| Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3950 | mutex_init(&obj->mm.lock); | 
|  | 3951 |  | 
| Joonas Lahtinen | 56cea32 | 2016-11-02 12:16:04 +0200 | [diff] [blame] | 3952 | INIT_LIST_HEAD(&obj->global_link); | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 3953 | INIT_LIST_HEAD(&obj->userfault_link); | 
| Ben Widawsky | b25cb2f | 2013-08-14 11:38:33 +0200 | [diff] [blame] | 3954 | INIT_LIST_HEAD(&obj->obj_exec_link); | 
| Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 3955 | INIT_LIST_HEAD(&obj->vma_list); | 
| Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 3956 | INIT_LIST_HEAD(&obj->batch_pool_link); | 
| Chris Wilson | 0327d6b | 2012-08-11 15:41:06 +0100 | [diff] [blame] | 3957 |  | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 3958 | obj->ops = ops; | 
|  | 3959 |  | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 3960 | reservation_object_init(&obj->__builtin_resv); | 
|  | 3961 | obj->resv = &obj->__builtin_resv; | 
|  | 3962 |  | 
| Chris Wilson | 5034924 | 2016-08-18 17:17:04 +0100 | [diff] [blame] | 3963 | obj->frontbuffer_ggtt_origin = ORIGIN_GTT; | 
| Chris Wilson | 5b8c8ae | 2016-11-16 19:07:04 +0000 | [diff] [blame] | 3964 | init_request_active(&obj->frontbuffer_write, frontbuffer_retire); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3965 |  | 
|  | 3966 | obj->mm.madv = I915_MADV_WILLNEED; | 
|  | 3967 | INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); | 
|  | 3968 | mutex_init(&obj->mm.get_page.lock); | 
| Chris Wilson | 0327d6b | 2012-08-11 15:41:06 +0100 | [diff] [blame] | 3969 |  | 
| Dave Gordon | f19ec8c | 2016-07-04 11:34:37 +0100 | [diff] [blame] | 3970 | i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); | 
| Chris Wilson | 0327d6b | 2012-08-11 15:41:06 +0100 | [diff] [blame] | 3971 | } | 
|  | 3972 |  | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 3973 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { | 
| Tvrtko Ursulin | 3599a91 | 2016-11-01 14:44:10 +0000 | [diff] [blame] | 3974 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | | 
|  | 3975 | I915_GEM_OBJECT_IS_SHRINKABLE, | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 3976 | .get_pages = i915_gem_object_get_pages_gtt, | 
|  | 3977 | .put_pages = i915_gem_object_put_pages_gtt, | 
|  | 3978 | }; | 
|  | 3979 |  | 
| Chris Wilson | b4bcbe2 | 2016-10-18 13:02:49 +0100 | [diff] [blame] | 3980 | struct drm_i915_gem_object * | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 3981 | i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) | 
| Daniel Vetter | ac52bc5 | 2010-04-09 19:05:06 +0000 | [diff] [blame] | 3982 | { | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 3983 | struct drm_i915_gem_object *obj; | 
| Hugh Dickins | 5949eac | 2011-06-27 16:18:18 -0700 | [diff] [blame] | 3984 | struct address_space *mapping; | 
| Daniel Vetter | 1a240d4 | 2012-11-29 22:18:51 +0100 | [diff] [blame] | 3985 | gfp_t mask; | 
| Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 3986 | int ret; | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 3987 |  | 
| Chris Wilson | b4bcbe2 | 2016-10-18 13:02:49 +0100 | [diff] [blame] | 3988 | /* There is a prevalence of the assumption that we fit the object's | 
|  | 3989 | * page count inside a 32bit _signed_ variable. Let's document this and | 
|  | 3990 | * catch if we ever need to fix it. In the meantime, if you do spot | 
|  | 3991 | * such a local variable, please consider fixing! | 
|  | 3992 | */ | 
|  | 3993 | if (WARN_ON(size >> PAGE_SHIFT > INT_MAX)) | 
|  | 3994 | return ERR_PTR(-E2BIG); | 
|  | 3995 |  | 
|  | 3996 | if (overflows_type(size, obj->base.size)) | 
|  | 3997 | return ERR_PTR(-E2BIG); | 
|  | 3998 |  | 
| Tvrtko Ursulin | 187685c | 2016-12-01 14:16:36 +0000 | [diff] [blame] | 3999 | obj = i915_gem_object_alloc(dev_priv); | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4000 | if (obj == NULL) | 
| Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 4001 | return ERR_PTR(-ENOMEM); | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4002 |  | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 4003 | ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size); | 
| Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 4004 | if (ret) | 
|  | 4005 | goto fail; | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4006 |  | 
| Chris Wilson | bed1ea9 | 2012-05-24 20:48:12 +0100 | [diff] [blame] | 4007 | mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; | 
| Jani Nikula | c0f8683 | 2016-12-07 12:13:04 +0200 | [diff] [blame] | 4008 | if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { | 
| Chris Wilson | bed1ea9 | 2012-05-24 20:48:12 +0100 | [diff] [blame] | 4009 | /* 965gm cannot relocate objects above 4GiB. */ | 
|  | 4010 | mask &= ~__GFP_HIGHMEM; | 
|  | 4011 | mask |= __GFP_DMA32; | 
|  | 4012 | } | 
|  | 4013 |  | 
| Al Viro | 93c76a3 | 2015-12-04 23:45:44 -0500 | [diff] [blame] | 4014 | mapping = obj->base.filp->f_mapping; | 
| Chris Wilson | bed1ea9 | 2012-05-24 20:48:12 +0100 | [diff] [blame] | 4015 | mapping_set_gfp_mask(mapping, mask); | 
| Hugh Dickins | 5949eac | 2011-06-27 16:18:18 -0700 | [diff] [blame] | 4016 |  | 
| Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 4017 | i915_gem_object_init(obj, &i915_gem_object_ops); | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 4018 |  | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4019 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 
|  | 4020 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 
|  | 4021 |  | 
| Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 4022 | if (HAS_LLC(dev_priv)) { | 
| Eugeni Dodonov | 3d29b84 | 2012-01-17 14:43:53 -0200 | [diff] [blame] | 4023 | /* On some devices, we can have the GPU use the LLC (the CPU | 
| Eric Anholt | a187111 | 2011-03-29 16:59:55 -0700 | [diff] [blame] | 4024 | * cache) for about a 10% performance improvement | 
|  | 4025 | * compared to uncached.  Graphics requests other than | 
|  | 4026 | * display scanout are coherent with the CPU in | 
|  | 4027 | * accessing this cache.  This means in this mode we | 
|  | 4028 | * don't need to clflush on the CPU side, and on the | 
|  | 4029 | * GPU side we only need to flush internal caches to | 
|  | 4030 | * get data visible to the CPU. | 
|  | 4031 | * | 
|  | 4032 | * However, we maintain the display planes as UC, and so | 
|  | 4033 | * need to rebind when first used as such. | 
|  | 4034 | */ | 
|  | 4035 | obj->cache_level = I915_CACHE_LLC; | 
|  | 4036 | } else | 
|  | 4037 | obj->cache_level = I915_CACHE_NONE; | 
|  | 4038 |  | 
| Daniel Vetter | d861e33 | 2013-07-24 23:25:03 +0200 | [diff] [blame] | 4039 | trace_i915_gem_object_create(obj); | 
|  | 4040 |  | 
| Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 4041 | return obj; | 
| Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 4042 |  | 
|  | 4043 | fail: | 
|  | 4044 | i915_gem_object_free(obj); | 
| Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 4045 | return ERR_PTR(ret); | 
| Daniel Vetter | ac52bc5 | 2010-04-09 19:05:06 +0000 | [diff] [blame] | 4046 | } | 
|  | 4047 |  | 
| Chris Wilson | 340fbd8 | 2014-05-22 09:16:52 +0100 | [diff] [blame] | 4048 | static bool discard_backing_storage(struct drm_i915_gem_object *obj) | 
|  | 4049 | { | 
|  | 4050 | /* If we are the last user of the backing storage (be it shmemfs | 
|  | 4051 | * pages or stolen etc), we know that the pages are going to be | 
|  | 4052 | * immediately released. In this case, we can then skip copying | 
|  | 4053 | * back the contents from the GPU. | 
|  | 4054 | */ | 
|  | 4055 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 4056 | if (obj->mm.madv != I915_MADV_WILLNEED) | 
| Chris Wilson | 340fbd8 | 2014-05-22 09:16:52 +0100 | [diff] [blame] | 4057 | return false; | 
|  | 4058 |  | 
|  | 4059 | if (obj->base.filp == NULL) | 
|  | 4060 | return true; | 
|  | 4061 |  | 
|  | 4062 | /* At first glance, this looks racy, but then again so would be | 
|  | 4063 | * userspace racing mmap against close. However, the first external | 
|  | 4064 | * reference to the filp can only be obtained through the | 
|  | 4065 | * i915_gem_mmap_ioctl() which safeguards us against the user | 
|  | 4066 | * acquiring such a reference whilst we are in the middle of | 
|  | 4067 | * freeing the object. | 
|  | 4068 | */ | 
|  | 4069 | return atomic_long_read(&obj->base.filp->f_count) == 1; | 
|  | 4070 | } | 
|  | 4071 |  | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4072 | static void __i915_gem_free_objects(struct drm_i915_private *i915, | 
|  | 4073 | struct llist_node *freed) | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4074 | { | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4075 | struct drm_i915_gem_object *obj, *on; | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4076 |  | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4077 | mutex_lock(&i915->drm.struct_mutex); | 
|  | 4078 | intel_runtime_pm_get(i915); | 
|  | 4079 | llist_for_each_entry(obj, freed, freed) { | 
|  | 4080 | struct i915_vma *vma, *vn; | 
| Paulo Zanoni | f65c916 | 2013-11-27 18:20:34 -0200 | [diff] [blame] | 4081 |  | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4082 | trace_i915_gem_object_destroy(obj); | 
|  | 4083 |  | 
|  | 4084 | GEM_BUG_ON(i915_gem_object_is_active(obj)); | 
|  | 4085 | list_for_each_entry_safe(vma, vn, | 
|  | 4086 | &obj->vma_list, obj_link) { | 
|  | 4087 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); | 
|  | 4088 | GEM_BUG_ON(i915_vma_is_active(vma)); | 
|  | 4089 | vma->flags &= ~I915_VMA_PIN_MASK; | 
|  | 4090 | i915_vma_close(vma); | 
|  | 4091 | } | 
| Chris Wilson | db6c2b4 | 2016-11-01 11:54:00 +0000 | [diff] [blame] | 4092 | GEM_BUG_ON(!list_empty(&obj->vma_list)); | 
|  | 4093 | GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4094 |  | 
| Joonas Lahtinen | 56cea32 | 2016-11-02 12:16:04 +0200 | [diff] [blame] | 4095 | list_del(&obj->global_link); | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4096 | } | 
|  | 4097 | intel_runtime_pm_put(i915); | 
|  | 4098 | mutex_unlock(&i915->drm.struct_mutex); | 
|  | 4099 |  | 
|  | 4100 | llist_for_each_entry_safe(obj, on, freed, freed) { | 
|  | 4101 | GEM_BUG_ON(obj->bind_count); | 
|  | 4102 | GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); | 
|  | 4103 |  | 
|  | 4104 | if (obj->ops->release) | 
|  | 4105 | obj->ops->release(obj); | 
|  | 4106 |  | 
|  | 4107 | if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) | 
|  | 4108 | atomic_set(&obj->mm.pages_pin_count, 0); | 
| Chris Wilson | 548625e | 2016-11-01 12:11:34 +0000 | [diff] [blame] | 4109 | __i915_gem_object_put_pages(obj, I915_MM_NORMAL); | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4110 | GEM_BUG_ON(obj->mm.pages); | 
|  | 4111 |  | 
|  | 4112 | if (obj->base.import_attach) | 
|  | 4113 | drm_prime_gem_destroy(&obj->base, NULL); | 
|  | 4114 |  | 
| Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 4115 | reservation_object_fini(&obj->__builtin_resv); | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4116 | drm_gem_object_release(&obj->base); | 
|  | 4117 | i915_gem_info_remove_obj(i915, obj->base.size); | 
|  | 4118 |  | 
|  | 4119 | kfree(obj->bit_17); | 
|  | 4120 | i915_gem_object_free(obj); | 
|  | 4121 | } | 
|  | 4122 | } | 
|  | 4123 |  | 
|  | 4124 | static void i915_gem_flush_free_objects(struct drm_i915_private *i915) | 
|  | 4125 | { | 
|  | 4126 | struct llist_node *freed; | 
|  | 4127 |  | 
|  | 4128 | freed = llist_del_all(&i915->mm.free_list); | 
|  | 4129 | if (unlikely(freed)) | 
|  | 4130 | __i915_gem_free_objects(i915, freed); | 
|  | 4131 | } | 
|  | 4132 |  | 
|  | 4133 | static void __i915_gem_free_work(struct work_struct *work) | 
|  | 4134 | { | 
|  | 4135 | struct drm_i915_private *i915 = | 
|  | 4136 | container_of(work, struct drm_i915_private, mm.free_work); | 
|  | 4137 | struct llist_node *freed; | 
| Chris Wilson | 26e12f8 | 2011-03-20 11:20:19 +0000 | [diff] [blame] | 4138 |  | 
| Chris Wilson | b1f788c | 2016-08-04 07:52:45 +0100 | [diff] [blame] | 4139 | /* All file-owned VMA should have been released by this point through | 
|  | 4140 | * i915_gem_close_object(), or earlier by i915_gem_context_close(). | 
|  | 4141 | * However, the object may also be bound into the global GTT (e.g. | 
|  | 4142 | * older GPUs without per-process support, or for direct access through | 
|  | 4143 | * the GTT either for the user or for scanout). Those VMA still need to | 
|  | 4144 | * unbound now. | 
|  | 4145 | */ | 
| Chris Wilson | 1488fc0 | 2012-04-24 15:47:31 +0100 | [diff] [blame] | 4146 |  | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4147 | while ((freed = llist_del_all(&i915->mm.free_list))) | 
|  | 4148 | __i915_gem_free_objects(i915, freed); | 
|  | 4149 | } | 
|  | 4150 |  | 
|  | 4151 | static void __i915_gem_free_object_rcu(struct rcu_head *head) | 
|  | 4152 | { | 
|  | 4153 | struct drm_i915_gem_object *obj = | 
|  | 4154 | container_of(head, typeof(*obj), rcu); | 
|  | 4155 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 
|  | 4156 |  | 
|  | 4157 | /* We can't simply use call_rcu() from i915_gem_free_object() | 
|  | 4158 | * as we need to block whilst unbinding, and the call_rcu | 
|  | 4159 | * task may be called from softirq context. So we take a | 
|  | 4160 | * detour through a worker. | 
|  | 4161 | */ | 
|  | 4162 | if (llist_add(&obj->freed, &i915->mm.free_list)) | 
|  | 4163 | schedule_work(&i915->mm.free_work); | 
|  | 4164 | } | 
|  | 4165 |  | 
|  | 4166 | void i915_gem_free_object(struct drm_gem_object *gem_obj) | 
|  | 4167 | { | 
|  | 4168 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); | 
|  | 4169 |  | 
| Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 4170 | if (obj->mm.quirked) | 
|  | 4171 | __i915_gem_object_unpin_pages(obj); | 
|  | 4172 |  | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4173 | if (discard_backing_storage(obj)) | 
|  | 4174 | obj->mm.madv = I915_MADV_DONTNEED; | 
| Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 4175 |  | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4176 | /* Before we free the object, make sure any pure RCU-only | 
|  | 4177 | * read-side critical sections are complete, e.g. | 
|  | 4178 | * i915_gem_busy_ioctl(). For the corresponding synchronized | 
|  | 4179 | * lookup see i915_gem_object_lookup_rcu(). | 
|  | 4180 | */ | 
|  | 4181 | call_rcu(&obj->rcu, __i915_gem_free_object_rcu); | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4182 | } | 
|  | 4183 |  | 
| Chris Wilson | f8a7fde | 2016-10-28 13:58:29 +0100 | [diff] [blame] | 4184 | void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) | 
|  | 4185 | { | 
|  | 4186 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 
|  | 4187 |  | 
|  | 4188 | GEM_BUG_ON(i915_gem_object_has_active_reference(obj)); | 
|  | 4189 | if (i915_gem_object_is_active(obj)) | 
|  | 4190 | i915_gem_object_set_active_reference(obj); | 
|  | 4191 | else | 
|  | 4192 | i915_gem_object_put(obj); | 
|  | 4193 | } | 
|  | 4194 |  | 
| Chris Wilson | 3033aca | 2016-10-28 13:58:47 +0100 | [diff] [blame] | 4195 | static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) | 
|  | 4196 | { | 
|  | 4197 | struct intel_engine_cs *engine; | 
|  | 4198 | enum intel_engine_id id; | 
|  | 4199 |  | 
|  | 4200 | for_each_engine(engine, dev_priv, id) | 
| Chris Wilson | f131e35 | 2016-12-29 14:40:37 +0000 | [diff] [blame] | 4201 | GEM_BUG_ON(engine->last_retired_context && | 
|  | 4202 | !i915_gem_context_is_kernel(engine->last_retired_context)); | 
| Chris Wilson | 3033aca | 2016-10-28 13:58:47 +0100 | [diff] [blame] | 4203 | } | 
|  | 4204 |  | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4205 | int i915_gem_suspend(struct drm_i915_private *dev_priv) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4206 | { | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4207 | struct drm_device *dev = &dev_priv->drm; | 
| Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 4208 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4209 |  | 
| Chris Wilson | 54b4f68 | 2016-07-21 21:16:19 +0100 | [diff] [blame] | 4210 | intel_suspend_gt_powersave(dev_priv); | 
|  | 4211 |  | 
| Chris Wilson | 45c5f20 | 2013-10-16 11:50:01 +0100 | [diff] [blame] | 4212 | mutex_lock(&dev->struct_mutex); | 
| Chris Wilson | 5ab57c7 | 2016-07-15 14:56:20 +0100 | [diff] [blame] | 4213 |  | 
|  | 4214 | /* We have to flush all the executing contexts to main memory so | 
|  | 4215 | * that they can saved in the hibernation image. To ensure the last | 
|  | 4216 | * context image is coherent, we have to switch away from it. That | 
|  | 4217 | * leaves the dev_priv->kernel_context still active when | 
|  | 4218 | * we actually suspend, and its image in memory may not match the GPU | 
|  | 4219 | * state. Fortunately, the kernel_context is disposable and we do | 
|  | 4220 | * not rely on its state. | 
|  | 4221 | */ | 
|  | 4222 | ret = i915_gem_switch_to_kernel_context(dev_priv); | 
|  | 4223 | if (ret) | 
|  | 4224 | goto err; | 
|  | 4225 |  | 
| Chris Wilson | 22dd3bb | 2016-09-09 14:11:50 +0100 | [diff] [blame] | 4226 | ret = i915_gem_wait_for_idle(dev_priv, | 
|  | 4227 | I915_WAIT_INTERRUPTIBLE | | 
|  | 4228 | I915_WAIT_LOCKED); | 
| Chris Wilson | f740334 | 2013-09-13 23:57:04 +0100 | [diff] [blame] | 4229 | if (ret) | 
| Chris Wilson | 45c5f20 | 2013-10-16 11:50:01 +0100 | [diff] [blame] | 4230 | goto err; | 
| Chris Wilson | f740334 | 2013-09-13 23:57:04 +0100 | [diff] [blame] | 4231 |  | 
| Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 4232 | i915_gem_retire_requests(dev_priv); | 
| Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 4233 | GEM_BUG_ON(dev_priv->gt.active_requests); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4234 |  | 
| Chris Wilson | 3033aca | 2016-10-28 13:58:47 +0100 | [diff] [blame] | 4235 | assert_kernel_context_is_current(dev_priv); | 
| Chris Wilson | b2e862d | 2016-04-28 09:56:41 +0100 | [diff] [blame] | 4236 | i915_gem_context_lost(dev_priv); | 
| Chris Wilson | 45c5f20 | 2013-10-16 11:50:01 +0100 | [diff] [blame] | 4237 | mutex_unlock(&dev->struct_mutex); | 
|  | 4238 |  | 
| Chris Wilson | 737b150 | 2015-01-26 18:03:03 +0200 | [diff] [blame] | 4239 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 4240 | cancel_delayed_work_sync(&dev_priv->gt.retire_work); | 
| Chris Wilson | bdeb978 | 2016-12-23 14:57:56 +0000 | [diff] [blame] | 4241 |  | 
|  | 4242 | /* As the idle_work is rearming if it detects a race, play safe and | 
|  | 4243 | * repeat the flush until it is definitely idle. | 
|  | 4244 | */ | 
|  | 4245 | while (flush_delayed_work(&dev_priv->gt.idle_work)) | 
|  | 4246 | ; | 
|  | 4247 |  | 
|  | 4248 | i915_gem_drain_freed_objects(dev_priv); | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4249 |  | 
| Chris Wilson | bdcf120 | 2014-11-25 11:56:33 +0000 | [diff] [blame] | 4250 | /* Assert that we sucessfully flushed all the work and | 
|  | 4251 | * reset the GPU back to its idle, low power state. | 
|  | 4252 | */ | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 4253 | WARN_ON(dev_priv->gt.awake); | 
| Imre Deak | 31ab49a | 2016-11-07 11:20:05 +0200 | [diff] [blame] | 4254 | WARN_ON(!intel_execlists_idle(dev_priv)); | 
| Chris Wilson | bdcf120 | 2014-11-25 11:56:33 +0000 | [diff] [blame] | 4255 |  | 
| Imre Deak | 1c777c5 | 2016-10-12 17:46:37 +0300 | [diff] [blame] | 4256 | /* | 
|  | 4257 | * Neither the BIOS, ourselves or any other kernel | 
|  | 4258 | * expects the system to be in execlists mode on startup, | 
|  | 4259 | * so we need to reset the GPU back to legacy mode. And the only | 
|  | 4260 | * known way to disable logical contexts is through a GPU reset. | 
|  | 4261 | * | 
|  | 4262 | * So in order to leave the system in a known default configuration, | 
|  | 4263 | * always reset the GPU upon unload and suspend. Afterwards we then | 
|  | 4264 | * clean up the GEM state tracking, flushing off the requests and | 
|  | 4265 | * leaving the system in a known idle state. | 
|  | 4266 | * | 
|  | 4267 | * Note that is of the upmost importance that the GPU is idle and | 
|  | 4268 | * all stray writes are flushed *before* we dismantle the backing | 
|  | 4269 | * storage for the pinned objects. | 
|  | 4270 | * | 
|  | 4271 | * However, since we are uncertain that resetting the GPU on older | 
|  | 4272 | * machines is a good idea, we don't - just in case it leaves the | 
|  | 4273 | * machine in an unusable condition. | 
|  | 4274 | */ | 
| Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 4275 | if (HAS_HW_CONTEXTS(dev_priv)) { | 
| Imre Deak | 1c777c5 | 2016-10-12 17:46:37 +0300 | [diff] [blame] | 4276 | int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); | 
|  | 4277 | WARN_ON(reset && reset != -ENODEV); | 
|  | 4278 | } | 
|  | 4279 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4280 | return 0; | 
| Chris Wilson | 45c5f20 | 2013-10-16 11:50:01 +0100 | [diff] [blame] | 4281 |  | 
|  | 4282 | err: | 
|  | 4283 | mutex_unlock(&dev->struct_mutex); | 
|  | 4284 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4285 | } | 
|  | 4286 |  | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4287 | void i915_gem_resume(struct drm_i915_private *dev_priv) | 
| Chris Wilson | 5ab57c7 | 2016-07-15 14:56:20 +0100 | [diff] [blame] | 4288 | { | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4289 | struct drm_device *dev = &dev_priv->drm; | 
| Chris Wilson | 5ab57c7 | 2016-07-15 14:56:20 +0100 | [diff] [blame] | 4290 |  | 
| Imre Deak | 31ab49a | 2016-11-07 11:20:05 +0200 | [diff] [blame] | 4291 | WARN_ON(dev_priv->gt.awake); | 
|  | 4292 |  | 
| Chris Wilson | 5ab57c7 | 2016-07-15 14:56:20 +0100 | [diff] [blame] | 4293 | mutex_lock(&dev->struct_mutex); | 
| Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 4294 | i915_gem_restore_gtt_mappings(dev_priv); | 
| Chris Wilson | 5ab57c7 | 2016-07-15 14:56:20 +0100 | [diff] [blame] | 4295 |  | 
|  | 4296 | /* As we didn't flush the kernel context before suspend, we cannot | 
|  | 4297 | * guarantee that the context image is complete. So let's just reset | 
|  | 4298 | * it and start again. | 
|  | 4299 | */ | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 4300 | dev_priv->gt.resume(dev_priv); | 
| Chris Wilson | 5ab57c7 | 2016-07-15 14:56:20 +0100 | [diff] [blame] | 4301 |  | 
|  | 4302 | mutex_unlock(&dev->struct_mutex); | 
|  | 4303 | } | 
|  | 4304 |  | 
| Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 4305 | void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) | 
| Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 4306 | { | 
| Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 4307 | if (INTEL_GEN(dev_priv) < 5 || | 
| Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 4308 | dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) | 
|  | 4309 | return; | 
|  | 4310 |  | 
|  | 4311 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | | 
|  | 4312 | DISP_TILE_SURFACE_SWIZZLING); | 
|  | 4313 |  | 
| Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 4314 | if (IS_GEN5(dev_priv)) | 
| Daniel Vetter | 11782b0 | 2012-01-31 16:47:55 +0100 | [diff] [blame] | 4315 | return; | 
|  | 4316 |  | 
| Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 4317 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); | 
| Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 4318 | if (IS_GEN6(dev_priv)) | 
| Daniel Vetter | 6b26c86 | 2012-04-24 14:04:12 +0200 | [diff] [blame] | 4319 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); | 
| Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 4320 | else if (IS_GEN7(dev_priv)) | 
| Daniel Vetter | 6b26c86 | 2012-04-24 14:04:12 +0200 | [diff] [blame] | 4321 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); | 
| Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 4322 | else if (IS_GEN8(dev_priv)) | 
| Ben Widawsky | 31a5336 | 2013-11-02 21:07:04 -0700 | [diff] [blame] | 4323 | I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); | 
| Ben Widawsky | 8782e26 | 2012-12-18 10:31:23 -0800 | [diff] [blame] | 4324 | else | 
|  | 4325 | BUG(); | 
| Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 4326 | } | 
| Daniel Vetter | e21af88 | 2012-02-09 20:53:27 +0100 | [diff] [blame] | 4327 |  | 
| Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 4328 | static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) | 
| Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 4329 | { | 
| Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 4330 | I915_WRITE(RING_CTL(base), 0); | 
|  | 4331 | I915_WRITE(RING_HEAD(base), 0); | 
|  | 4332 | I915_WRITE(RING_TAIL(base), 0); | 
|  | 4333 | I915_WRITE(RING_START(base), 0); | 
|  | 4334 | } | 
|  | 4335 |  | 
| Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 4336 | static void init_unused_rings(struct drm_i915_private *dev_priv) | 
| Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 4337 | { | 
| Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 4338 | if (IS_I830(dev_priv)) { | 
|  | 4339 | init_unused_ring(dev_priv, PRB1_BASE); | 
|  | 4340 | init_unused_ring(dev_priv, SRB0_BASE); | 
|  | 4341 | init_unused_ring(dev_priv, SRB1_BASE); | 
|  | 4342 | init_unused_ring(dev_priv, SRB2_BASE); | 
|  | 4343 | init_unused_ring(dev_priv, SRB3_BASE); | 
|  | 4344 | } else if (IS_GEN2(dev_priv)) { | 
|  | 4345 | init_unused_ring(dev_priv, SRB0_BASE); | 
|  | 4346 | init_unused_ring(dev_priv, SRB1_BASE); | 
|  | 4347 | } else if (IS_GEN3(dev_priv)) { | 
|  | 4348 | init_unused_ring(dev_priv, PRB1_BASE); | 
|  | 4349 | init_unused_ring(dev_priv, PRB2_BASE); | 
| Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 4350 | } | 
|  | 4351 | } | 
|  | 4352 |  | 
| Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 4353 | int | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4354 | i915_gem_init_hw(struct drm_i915_private *dev_priv) | 
| Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 4355 | { | 
| Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 4356 | struct intel_engine_cs *engine; | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 4357 | enum intel_engine_id id; | 
| Chris Wilson | d200cda | 2016-04-28 09:56:44 +0100 | [diff] [blame] | 4358 | int ret; | 
| Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 4359 |  | 
| Chris Wilson | de867c2 | 2016-10-25 13:16:02 +0100 | [diff] [blame] | 4360 | dev_priv->gt.last_init_time = ktime_get(); | 
|  | 4361 |  | 
| Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 4362 | /* Double layer security blanket, see i915_gem_init() */ | 
|  | 4363 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 
|  | 4364 |  | 
| Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 4365 | if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) | 
| Ben Widawsky | 05e21cc | 2013-07-04 11:02:04 -0700 | [diff] [blame] | 4366 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); | 
| Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 4367 |  | 
| Tvrtko Ursulin | 772c2a5 | 2016-10-13 11:03:01 +0100 | [diff] [blame] | 4368 | if (IS_HASWELL(dev_priv)) | 
| Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 4369 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? | 
| Ville Syrjälä | 0bf2134 | 2013-11-29 14:56:12 +0200 | [diff] [blame] | 4370 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); | 
| Rodrigo Vivi | 9435373 | 2013-08-28 16:45:46 -0300 | [diff] [blame] | 4371 |  | 
| Tvrtko Ursulin | 6e26695 | 2016-10-13 11:02:53 +0100 | [diff] [blame] | 4372 | if (HAS_PCH_NOP(dev_priv)) { | 
| Tvrtko Ursulin | fd6b8f4 | 2016-10-14 10:13:06 +0100 | [diff] [blame] | 4373 | if (IS_IVYBRIDGE(dev_priv)) { | 
| Daniel Vetter | 6ba844b | 2014-01-22 23:39:30 +0100 | [diff] [blame] | 4374 | u32 temp = I915_READ(GEN7_MSG_CTL); | 
|  | 4375 | temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); | 
|  | 4376 | I915_WRITE(GEN7_MSG_CTL, temp); | 
| Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 4377 | } else if (INTEL_GEN(dev_priv) >= 7) { | 
| Daniel Vetter | 6ba844b | 2014-01-22 23:39:30 +0100 | [diff] [blame] | 4378 | u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); | 
|  | 4379 | temp &= ~RESET_PCH_HANDSHAKE_ENABLE; | 
|  | 4380 | I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); | 
|  | 4381 | } | 
| Ben Widawsky | 88a2b2a | 2013-04-05 13:12:43 -0700 | [diff] [blame] | 4382 | } | 
|  | 4383 |  | 
| Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 4384 | i915_gem_init_swizzling(dev_priv); | 
| Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 4385 |  | 
| Daniel Vetter | d5abdfd | 2014-11-20 09:45:19 +0100 | [diff] [blame] | 4386 | /* | 
|  | 4387 | * At least 830 can leave some of the unused rings | 
|  | 4388 | * "active" (ie. head != tail) after resume which | 
|  | 4389 | * will prevent c3 entry. Makes sure all unused rings | 
|  | 4390 | * are totally idle. | 
|  | 4391 | */ | 
| Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 4392 | init_unused_rings(dev_priv); | 
| Daniel Vetter | d5abdfd | 2014-11-20 09:45:19 +0100 | [diff] [blame] | 4393 |  | 
| Dave Gordon | ed54c1a | 2016-01-19 19:02:54 +0000 | [diff] [blame] | 4394 | BUG_ON(!dev_priv->kernel_context); | 
| John Harrison | 90638cc | 2015-05-29 17:43:37 +0100 | [diff] [blame] | 4395 |  | 
| Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 4396 | ret = i915_ppgtt_init_hw(dev_priv); | 
| John Harrison | 4ad2fd8 | 2015-06-18 13:11:20 +0100 | [diff] [blame] | 4397 | if (ret) { | 
|  | 4398 | DRM_ERROR("PPGTT enable HW failed %d\n", ret); | 
|  | 4399 | goto out; | 
|  | 4400 | } | 
|  | 4401 |  | 
|  | 4402 | /* Need to do basic initialisation of all rings first: */ | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 4403 | for_each_engine(engine, dev_priv, id) { | 
| Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 4404 | ret = engine->init_hw(engine); | 
| Daniel Vetter | 35a57ff | 2014-11-20 00:33:07 +0100 | [diff] [blame] | 4405 | if (ret) | 
| Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 4406 | goto out; | 
| Daniel Vetter | 35a57ff | 2014-11-20 00:33:07 +0100 | [diff] [blame] | 4407 | } | 
| Mika Kuoppala | 9943393 | 2013-01-22 14:12:17 +0200 | [diff] [blame] | 4408 |  | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4409 | intel_mocs_init_l3cc_table(dev_priv); | 
| Peter Antoine | 0ccdacf | 2016-04-13 15:03:25 +0100 | [diff] [blame] | 4410 |  | 
| Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 4411 | /* We can't enable contexts until all firmware is loaded */ | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4412 | ret = intel_guc_setup(dev_priv); | 
| Dave Gordon | e556f7c | 2016-06-07 09:14:49 +0100 | [diff] [blame] | 4413 | if (ret) | 
|  | 4414 | goto out; | 
| Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 4415 |  | 
| Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 4416 | out: | 
|  | 4417 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 
| Ben Widawsky | 2fa48d8 | 2013-12-06 14:11:04 -0800 | [diff] [blame] | 4418 | return ret; | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4419 | } | 
|  | 4420 |  | 
| Chris Wilson | 39df919 | 2016-07-20 13:31:57 +0100 | [diff] [blame] | 4421 | bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) | 
|  | 4422 | { | 
|  | 4423 | if (INTEL_INFO(dev_priv)->gen < 6) | 
|  | 4424 | return false; | 
|  | 4425 |  | 
|  | 4426 | /* TODO: make semaphores and Execlists play nicely together */ | 
|  | 4427 | if (i915.enable_execlists) | 
|  | 4428 | return false; | 
|  | 4429 |  | 
|  | 4430 | if (value >= 0) | 
|  | 4431 | return value; | 
|  | 4432 |  | 
|  | 4433 | #ifdef CONFIG_INTEL_IOMMU | 
|  | 4434 | /* Enable semaphores on SNB when IO remapping is off */ | 
|  | 4435 | if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped) | 
|  | 4436 | return false; | 
|  | 4437 | #endif | 
|  | 4438 |  | 
|  | 4439 | return true; | 
|  | 4440 | } | 
|  | 4441 |  | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4442 | int i915_gem_init(struct drm_i915_private *dev_priv) | 
| Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 4443 | { | 
| Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 4444 | int ret; | 
|  | 4445 |  | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4446 | mutex_lock(&dev_priv->drm.struct_mutex); | 
| Jesse Barnes | d62b489 | 2013-03-08 10:45:53 -0800 | [diff] [blame] | 4447 |  | 
| Oscar Mateo | a83014d | 2014-07-24 17:04:21 +0100 | [diff] [blame] | 4448 | if (!i915.enable_execlists) { | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 4449 | dev_priv->gt.resume = intel_legacy_submission_resume; | 
| Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 4450 | dev_priv->gt.cleanup_engine = intel_engine_cleanup; | 
| Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 4451 | } else { | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 4452 | dev_priv->gt.resume = intel_lr_context_resume; | 
| Tvrtko Ursulin | 117897f | 2016-03-16 11:00:40 +0000 | [diff] [blame] | 4453 | dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; | 
| Oscar Mateo | a83014d | 2014-07-24 17:04:21 +0100 | [diff] [blame] | 4454 | } | 
|  | 4455 |  | 
| Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 4456 | /* This is just a security blanket to placate dragons. | 
|  | 4457 | * On some systems, we very sporadically observe that the first TLBs | 
|  | 4458 | * used by the CS may be stale, despite us poking the TLB reset. If | 
|  | 4459 | * we hold the forcewake during initialisation these problems | 
|  | 4460 | * just magically go away. | 
|  | 4461 | */ | 
|  | 4462 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 
|  | 4463 |  | 
| Chris Wilson | 72778cb | 2016-05-19 16:17:16 +0100 | [diff] [blame] | 4464 | i915_gem_init_userptr(dev_priv); | 
| Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 4465 |  | 
|  | 4466 | ret = i915_gem_init_ggtt(dev_priv); | 
|  | 4467 | if (ret) | 
|  | 4468 | goto out_unlock; | 
| Jesse Barnes | d62b489 | 2013-03-08 10:45:53 -0800 | [diff] [blame] | 4469 |  | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4470 | ret = i915_gem_context_init(dev_priv); | 
| Jani Nikula | 7bcc377 | 2014-12-05 14:17:42 +0200 | [diff] [blame] | 4471 | if (ret) | 
|  | 4472 | goto out_unlock; | 
| Ben Widawsky | 2fa48d8 | 2013-12-06 14:11:04 -0800 | [diff] [blame] | 4473 |  | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4474 | ret = intel_engines_init(dev_priv); | 
| Daniel Vetter | 35a57ff | 2014-11-20 00:33:07 +0100 | [diff] [blame] | 4475 | if (ret) | 
| Jani Nikula | 7bcc377 | 2014-12-05 14:17:42 +0200 | [diff] [blame] | 4476 | goto out_unlock; | 
| Daniel Vetter | 53ca26c | 2012-04-26 23:28:03 +0200 | [diff] [blame] | 4477 |  | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4478 | ret = i915_gem_init_hw(dev_priv); | 
| Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 4479 | if (ret == -EIO) { | 
| Chris Wilson | 7e21d64 | 2016-07-27 09:07:29 +0100 | [diff] [blame] | 4480 | /* Allow engine initialisation to fail by marking the GPU as | 
| Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 4481 | * wedged. But we only want to do this where the GPU is angry, | 
|  | 4482 | * for all other failure, such as an allocation failure, bail. | 
|  | 4483 | */ | 
|  | 4484 | DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); | 
| Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 4485 | i915_gem_set_wedged(dev_priv); | 
| Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 4486 | ret = 0; | 
| Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 4487 | } | 
| Jani Nikula | 7bcc377 | 2014-12-05 14:17:42 +0200 | [diff] [blame] | 4488 |  | 
|  | 4489 | out_unlock: | 
| Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 4490 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 
| Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 4491 | mutex_unlock(&dev_priv->drm.struct_mutex); | 
| Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 4492 |  | 
| Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 4493 | return ret; | 
| Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 4494 | } | 
|  | 4495 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4496 | void | 
| Tvrtko Ursulin | cb15d9f | 2016-12-01 14:16:39 +0000 | [diff] [blame] | 4497 | i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4498 | { | 
| Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 4499 | struct intel_engine_cs *engine; | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 4500 | enum intel_engine_id id; | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4501 |  | 
| Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 4502 | for_each_engine(engine, dev_priv, id) | 
| Tvrtko Ursulin | 117897f | 2016-03-16 11:00:40 +0000 | [diff] [blame] | 4503 | dev_priv->gt.cleanup_engine(engine); | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4504 | } | 
|  | 4505 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4506 | void | 
| Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 4507 | i915_gem_load_init_fences(struct drm_i915_private *dev_priv) | 
|  | 4508 | { | 
| Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 4509 | int i; | 
| Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 4510 |  | 
|  | 4511 | if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && | 
|  | 4512 | !IS_CHERRYVIEW(dev_priv)) | 
|  | 4513 | dev_priv->num_fence_regs = 32; | 
| Jani Nikula | 73f67aa | 2016-12-07 22:48:09 +0200 | [diff] [blame] | 4514 | else if (INTEL_INFO(dev_priv)->gen >= 4 || | 
|  | 4515 | IS_I945G(dev_priv) || IS_I945GM(dev_priv) || | 
|  | 4516 | IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) | 
| Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 4517 | dev_priv->num_fence_regs = 16; | 
|  | 4518 | else | 
|  | 4519 | dev_priv->num_fence_regs = 8; | 
|  | 4520 |  | 
| Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 4521 | if (intel_vgpu_active(dev_priv)) | 
| Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 4522 | dev_priv->num_fence_regs = | 
|  | 4523 | I915_READ(vgtif_reg(avail_rs.fence_num)); | 
|  | 4524 |  | 
|  | 4525 | /* Initialize fence registers to zero */ | 
| Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 4526 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 
|  | 4527 | struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; | 
|  | 4528 |  | 
|  | 4529 | fence->i915 = dev_priv; | 
|  | 4530 | fence->id = i; | 
|  | 4531 | list_add_tail(&fence->link, &dev_priv->mm.fence_list); | 
|  | 4532 | } | 
| Tvrtko Ursulin | 4362f4f | 2016-11-16 08:55:33 +0000 | [diff] [blame] | 4533 | i915_gem_restore_fences(dev_priv); | 
| Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 4534 |  | 
| Tvrtko Ursulin | 4362f4f | 2016-11-16 08:55:33 +0000 | [diff] [blame] | 4535 | i915_gem_detect_bit_6_swizzle(dev_priv); | 
| Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 4536 | } | 
|  | 4537 |  | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4538 | int | 
| Tvrtko Ursulin | cb15d9f | 2016-12-01 14:16:39 +0000 | [diff] [blame] | 4539 | i915_gem_load_init(struct drm_i915_private *dev_priv) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4540 | { | 
| Tvrtko Ursulin | a933568 | 2016-11-02 15:14:59 +0000 | [diff] [blame] | 4541 | int err = -ENOMEM; | 
| Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 4542 |  | 
| Tvrtko Ursulin | a933568 | 2016-11-02 15:14:59 +0000 | [diff] [blame] | 4543 | dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); | 
|  | 4544 | if (!dev_priv->objects) | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4545 | goto err_out; | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4546 |  | 
| Tvrtko Ursulin | a933568 | 2016-11-02 15:14:59 +0000 | [diff] [blame] | 4547 | dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); | 
|  | 4548 | if (!dev_priv->vmas) | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4549 | goto err_objects; | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4550 |  | 
| Tvrtko Ursulin | a933568 | 2016-11-02 15:14:59 +0000 | [diff] [blame] | 4551 | dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, | 
|  | 4552 | SLAB_HWCACHE_ALIGN | | 
|  | 4553 | SLAB_RECLAIM_ACCOUNT | | 
|  | 4554 | SLAB_DESTROY_BY_RCU); | 
|  | 4555 | if (!dev_priv->requests) | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4556 | goto err_vmas; | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4557 |  | 
| Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 4558 | dev_priv->dependencies = KMEM_CACHE(i915_dependency, | 
|  | 4559 | SLAB_HWCACHE_ALIGN | | 
|  | 4560 | SLAB_RECLAIM_ACCOUNT); | 
|  | 4561 | if (!dev_priv->dependencies) | 
|  | 4562 | goto err_requests; | 
|  | 4563 |  | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4564 | mutex_lock(&dev_priv->drm.struct_mutex); | 
|  | 4565 | INIT_LIST_HEAD(&dev_priv->gt.timelines); | 
| Chris Wilson | bb89485 | 2016-11-14 20:40:57 +0000 | [diff] [blame] | 4566 | err = i915_gem_timeline_init__global(dev_priv); | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4567 | mutex_unlock(&dev_priv->drm.struct_mutex); | 
|  | 4568 | if (err) | 
| Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 4569 | goto err_dependencies; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4570 |  | 
| Ben Widawsky | a33afea | 2013-09-17 21:12:45 -0700 | [diff] [blame] | 4571 | INIT_LIST_HEAD(&dev_priv->context_list); | 
| Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 4572 | INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); | 
|  | 4573 | init_llist_head(&dev_priv->mm.free_list); | 
| Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 4574 | INIT_LIST_HEAD(&dev_priv->mm.unbound_list); | 
|  | 4575 | INIT_LIST_HEAD(&dev_priv->mm.bound_list); | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 4576 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 
| Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 4577 | INIT_LIST_HEAD(&dev_priv->mm.userfault_list); | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 4578 | INIT_DELAYED_WORK(&dev_priv->gt.retire_work, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4579 | i915_gem_retire_work_handler); | 
| Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 4580 | INIT_DELAYED_WORK(&dev_priv->gt.idle_work, | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 4581 | i915_gem_idle_work_handler); | 
| Chris Wilson | 1f15b76 | 2016-07-01 17:23:14 +0100 | [diff] [blame] | 4582 | init_waitqueue_head(&dev_priv->gpu_error.wait_queue); | 
| Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 4583 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 4584 |  | 
| Chris Wilson | 72bfa19 | 2010-12-19 11:42:05 +0000 | [diff] [blame] | 4585 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; | 
|  | 4586 |  | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 4587 | init_waitqueue_head(&dev_priv->pending_flip_queue); | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 4588 |  | 
| Chris Wilson | ce453d8 | 2011-02-21 14:43:56 +0000 | [diff] [blame] | 4589 | dev_priv->mm.interruptible = true; | 
|  | 4590 |  | 
| Joonas Lahtinen | 6f63340 | 2016-09-01 14:58:21 +0300 | [diff] [blame] | 4591 | atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); | 
|  | 4592 |  | 
| Chris Wilson | b5add95 | 2016-08-04 16:32:36 +0100 | [diff] [blame] | 4593 | spin_lock_init(&dev_priv->fb_tracking.lock); | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4594 |  | 
|  | 4595 | return 0; | 
|  | 4596 |  | 
| Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 4597 | err_dependencies: | 
|  | 4598 | kmem_cache_destroy(dev_priv->dependencies); | 
| Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 4599 | err_requests: | 
|  | 4600 | kmem_cache_destroy(dev_priv->requests); | 
|  | 4601 | err_vmas: | 
|  | 4602 | kmem_cache_destroy(dev_priv->vmas); | 
|  | 4603 | err_objects: | 
|  | 4604 | kmem_cache_destroy(dev_priv->objects); | 
|  | 4605 | err_out: | 
|  | 4606 | return err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4607 | } | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4608 |  | 
| Tvrtko Ursulin | cb15d9f | 2016-12-01 14:16:39 +0000 | [diff] [blame] | 4609 | void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) | 
| Imre Deak | d64aa09 | 2016-01-19 15:26:29 +0200 | [diff] [blame] | 4610 | { | 
| Chris Wilson | 7d5d59e | 2016-11-01 08:48:41 +0000 | [diff] [blame] | 4611 | WARN_ON(!llist_empty(&dev_priv->mm.free_list)); | 
|  | 4612 |  | 
| Matthew Auld | ea84aa7 | 2016-11-17 21:04:11 +0000 | [diff] [blame] | 4613 | mutex_lock(&dev_priv->drm.struct_mutex); | 
|  | 4614 | i915_gem_timeline_fini(&dev_priv->gt.global_timeline); | 
|  | 4615 | WARN_ON(!list_empty(&dev_priv->gt.timelines)); | 
|  | 4616 | mutex_unlock(&dev_priv->drm.struct_mutex); | 
|  | 4617 |  | 
| Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 4618 | kmem_cache_destroy(dev_priv->dependencies); | 
| Imre Deak | d64aa09 | 2016-01-19 15:26:29 +0200 | [diff] [blame] | 4619 | kmem_cache_destroy(dev_priv->requests); | 
|  | 4620 | kmem_cache_destroy(dev_priv->vmas); | 
|  | 4621 | kmem_cache_destroy(dev_priv->objects); | 
| Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 4622 |  | 
|  | 4623 | /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ | 
|  | 4624 | rcu_barrier(); | 
| Imre Deak | d64aa09 | 2016-01-19 15:26:29 +0200 | [diff] [blame] | 4625 | } | 
|  | 4626 |  | 
| Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 4627 | int i915_gem_freeze(struct drm_i915_private *dev_priv) | 
|  | 4628 | { | 
|  | 4629 | intel_runtime_pm_get(dev_priv); | 
|  | 4630 |  | 
|  | 4631 | mutex_lock(&dev_priv->drm.struct_mutex); | 
|  | 4632 | i915_gem_shrink_all(dev_priv); | 
|  | 4633 | mutex_unlock(&dev_priv->drm.struct_mutex); | 
|  | 4634 |  | 
|  | 4635 | intel_runtime_pm_put(dev_priv); | 
|  | 4636 |  | 
|  | 4637 | return 0; | 
|  | 4638 | } | 
|  | 4639 |  | 
| Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 4640 | int i915_gem_freeze_late(struct drm_i915_private *dev_priv) | 
|  | 4641 | { | 
|  | 4642 | struct drm_i915_gem_object *obj; | 
| Chris Wilson | 7aab2d5 | 2016-09-09 20:02:18 +0100 | [diff] [blame] | 4643 | struct list_head *phases[] = { | 
|  | 4644 | &dev_priv->mm.unbound_list, | 
|  | 4645 | &dev_priv->mm.bound_list, | 
|  | 4646 | NULL | 
|  | 4647 | }, **p; | 
| Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 4648 |  | 
|  | 4649 | /* Called just before we write the hibernation image. | 
|  | 4650 | * | 
|  | 4651 | * We need to update the domain tracking to reflect that the CPU | 
|  | 4652 | * will be accessing all the pages to create and restore from the | 
|  | 4653 | * hibernation, and so upon restoration those pages will be in the | 
|  | 4654 | * CPU domain. | 
|  | 4655 | * | 
|  | 4656 | * To make sure the hibernation image contains the latest state, | 
|  | 4657 | * we update that state just before writing out the image. | 
| Chris Wilson | 7aab2d5 | 2016-09-09 20:02:18 +0100 | [diff] [blame] | 4658 | * | 
|  | 4659 | * To try and reduce the hibernation image, we manually shrink | 
|  | 4660 | * the objects as well. | 
| Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 4661 | */ | 
|  | 4662 |  | 
| Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 4663 | mutex_lock(&dev_priv->drm.struct_mutex); | 
|  | 4664 | i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND); | 
| Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 4665 |  | 
| Chris Wilson | 7aab2d5 | 2016-09-09 20:02:18 +0100 | [diff] [blame] | 4666 | for (p = phases; *p; p++) { | 
| Joonas Lahtinen | 56cea32 | 2016-11-02 12:16:04 +0200 | [diff] [blame] | 4667 | list_for_each_entry(obj, *p, global_link) { | 
| Chris Wilson | 7aab2d5 | 2016-09-09 20:02:18 +0100 | [diff] [blame] | 4668 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 
|  | 4669 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 
|  | 4670 | } | 
| Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 4671 | } | 
| Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 4672 | mutex_unlock(&dev_priv->drm.struct_mutex); | 
| Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 4673 |  | 
|  | 4674 | return 0; | 
|  | 4675 | } | 
|  | 4676 |  | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 4677 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 4678 | { | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 4679 | struct drm_i915_file_private *file_priv = file->driver_priv; | 
| Chris Wilson | 15f7bbc | 2016-07-26 12:01:52 +0100 | [diff] [blame] | 4680 | struct drm_i915_gem_request *request; | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 4681 |  | 
|  | 4682 | /* Clean up our request list when the client is going away, so that | 
|  | 4683 | * later retire_requests won't dereference our soon-to-be-gone | 
|  | 4684 | * file_priv. | 
|  | 4685 | */ | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 4686 | spin_lock(&file_priv->mm.lock); | 
| Chris Wilson | 15f7bbc | 2016-07-26 12:01:52 +0100 | [diff] [blame] | 4687 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 4688 | request->file_priv = NULL; | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 4689 | spin_unlock(&file_priv->mm.lock); | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 4690 |  | 
| Chris Wilson | 2e1b873 | 2015-04-27 13:41:22 +0100 | [diff] [blame] | 4691 | if (!list_empty(&file_priv->rps.link)) { | 
| Chris Wilson | 8d3afd7 | 2015-05-21 21:01:47 +0100 | [diff] [blame] | 4692 | spin_lock(&to_i915(dev)->rps.client_lock); | 
| Chris Wilson | 2e1b873 | 2015-04-27 13:41:22 +0100 | [diff] [blame] | 4693 | list_del(&file_priv->rps.link); | 
| Chris Wilson | 8d3afd7 | 2015-05-21 21:01:47 +0100 | [diff] [blame] | 4694 | spin_unlock(&to_i915(dev)->rps.client_lock); | 
| Chris Wilson | 1854d5c | 2015-04-07 16:20:32 +0100 | [diff] [blame] | 4695 | } | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 4696 | } | 
|  | 4697 |  | 
|  | 4698 | int i915_gem_open(struct drm_device *dev, struct drm_file *file) | 
|  | 4699 | { | 
|  | 4700 | struct drm_i915_file_private *file_priv; | 
| Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 4701 | int ret; | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 4702 |  | 
| Chris Wilson | c4c29d7 | 2016-11-09 10:45:07 +0000 | [diff] [blame] | 4703 | DRM_DEBUG("\n"); | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 4704 |  | 
|  | 4705 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); | 
|  | 4706 | if (!file_priv) | 
|  | 4707 | return -ENOMEM; | 
|  | 4708 |  | 
|  | 4709 | file->driver_priv = file_priv; | 
| Dave Gordon | f19ec8c | 2016-07-04 11:34:37 +0100 | [diff] [blame] | 4710 | file_priv->dev_priv = to_i915(dev); | 
| Chris Wilson | ab0e7ff | 2014-02-25 17:11:24 +0200 | [diff] [blame] | 4711 | file_priv->file = file; | 
| Chris Wilson | 2e1b873 | 2015-04-27 13:41:22 +0100 | [diff] [blame] | 4712 | INIT_LIST_HEAD(&file_priv->rps.link); | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 4713 |  | 
|  | 4714 | spin_lock_init(&file_priv->mm.lock); | 
|  | 4715 | INIT_LIST_HEAD(&file_priv->mm.request_list); | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 4716 |  | 
| Chris Wilson | c80ff16 | 2016-07-27 09:07:27 +0100 | [diff] [blame] | 4717 | file_priv->bsd_engine = -1; | 
| Tvrtko Ursulin | de1add3 | 2016-01-15 15:12:50 +0000 | [diff] [blame] | 4718 |  | 
| Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 4719 | ret = i915_gem_context_open(dev, file); | 
|  | 4720 | if (ret) | 
|  | 4721 | kfree(file_priv); | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 4722 |  | 
| Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 4723 | return ret; | 
| Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 4724 | } | 
|  | 4725 |  | 
| Daniel Vetter | b680c37 | 2014-09-19 18:27:27 +0200 | [diff] [blame] | 4726 | /** | 
|  | 4727 | * i915_gem_track_fb - update frontbuffer tracking | 
| Geliang Tang | d9072a3 | 2015-09-15 05:58:44 -0700 | [diff] [blame] | 4728 | * @old: current GEM buffer for the frontbuffer slots | 
|  | 4729 | * @new: new GEM buffer for the frontbuffer slots | 
|  | 4730 | * @frontbuffer_bits: bitmask of frontbuffer slots | 
| Daniel Vetter | b680c37 | 2014-09-19 18:27:27 +0200 | [diff] [blame] | 4731 | * | 
|  | 4732 | * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them | 
|  | 4733 | * from @old and setting them in @new. Both @old and @new can be NULL. | 
|  | 4734 | */ | 
| Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 4735 | void i915_gem_track_fb(struct drm_i915_gem_object *old, | 
|  | 4736 | struct drm_i915_gem_object *new, | 
|  | 4737 | unsigned frontbuffer_bits) | 
|  | 4738 | { | 
| Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 4739 | /* Control of individual bits within the mask are guarded by | 
|  | 4740 | * the owning plane->mutex, i.e. we can never see concurrent | 
|  | 4741 | * manipulation of individual bits. But since the bitfield as a whole | 
|  | 4742 | * is updated using RMW, we need to use atomics in order to update | 
|  | 4743 | * the bits. | 
|  | 4744 | */ | 
|  | 4745 | BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > | 
|  | 4746 | sizeof(atomic_t) * BITS_PER_BYTE); | 
|  | 4747 |  | 
| Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 4748 | if (old) { | 
| Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 4749 | WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); | 
|  | 4750 | atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); | 
| Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 4751 | } | 
|  | 4752 |  | 
|  | 4753 | if (new) { | 
| Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 4754 | WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); | 
|  | 4755 | atomic_or(frontbuffer_bits, &new->frontbuffer_bits); | 
| Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 4756 | } | 
|  | 4757 | } | 
|  | 4758 |  | 
| Dave Gordon | ea70299 | 2015-07-09 19:29:02 +0100 | [diff] [blame] | 4759 | /* Allocate a new GEM object and fill it with the supplied data */ | 
|  | 4760 | struct drm_i915_gem_object * | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 4761 | i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, | 
| Dave Gordon | ea70299 | 2015-07-09 19:29:02 +0100 | [diff] [blame] | 4762 | const void *data, size_t size) | 
|  | 4763 | { | 
|  | 4764 | struct drm_i915_gem_object *obj; | 
|  | 4765 | struct sg_table *sg; | 
|  | 4766 | size_t bytes; | 
|  | 4767 | int ret; | 
|  | 4768 |  | 
| Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 4769 | obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); | 
| Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 4770 | if (IS_ERR(obj)) | 
| Dave Gordon | ea70299 | 2015-07-09 19:29:02 +0100 | [diff] [blame] | 4771 | return obj; | 
|  | 4772 |  | 
|  | 4773 | ret = i915_gem_object_set_to_cpu_domain(obj, true); | 
|  | 4774 | if (ret) | 
|  | 4775 | goto fail; | 
|  | 4776 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 4777 | ret = i915_gem_object_pin_pages(obj); | 
| Dave Gordon | ea70299 | 2015-07-09 19:29:02 +0100 | [diff] [blame] | 4778 | if (ret) | 
|  | 4779 | goto fail; | 
|  | 4780 |  | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 4781 | sg = obj->mm.pages; | 
| Dave Gordon | ea70299 | 2015-07-09 19:29:02 +0100 | [diff] [blame] | 4782 | bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 4783 | obj->mm.dirty = true; /* Backing store is now out of date */ | 
| Dave Gordon | ea70299 | 2015-07-09 19:29:02 +0100 | [diff] [blame] | 4784 | i915_gem_object_unpin_pages(obj); | 
|  | 4785 |  | 
|  | 4786 | if (WARN_ON(bytes != size)) { | 
|  | 4787 | DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size); | 
|  | 4788 | ret = -EFAULT; | 
|  | 4789 | goto fail; | 
|  | 4790 | } | 
|  | 4791 |  | 
|  | 4792 | return obj; | 
|  | 4793 |  | 
|  | 4794 | fail: | 
| Chris Wilson | f8c417c | 2016-07-20 13:31:53 +0100 | [diff] [blame] | 4795 | i915_gem_object_put(obj); | 
| Dave Gordon | ea70299 | 2015-07-09 19:29:02 +0100 | [diff] [blame] | 4796 | return ERR_PTR(ret); | 
|  | 4797 | } | 
| Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 4798 |  | 
|  | 4799 | struct scatterlist * | 
|  | 4800 | i915_gem_object_get_sg(struct drm_i915_gem_object *obj, | 
|  | 4801 | unsigned int n, | 
|  | 4802 | unsigned int *offset) | 
|  | 4803 | { | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 4804 | struct i915_gem_object_page_iter *iter = &obj->mm.get_page; | 
| Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 4805 | struct scatterlist *sg; | 
|  | 4806 | unsigned int idx, count; | 
|  | 4807 |  | 
|  | 4808 | might_sleep(); | 
|  | 4809 | GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 4810 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); | 
| Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 4811 |  | 
|  | 4812 | /* As we iterate forward through the sg, we record each entry in a | 
|  | 4813 | * radixtree for quick repeated (backwards) lookups. If we have seen | 
|  | 4814 | * this index previously, we will have an entry for it. | 
|  | 4815 | * | 
|  | 4816 | * Initial lookup is O(N), but this is amortized to O(1) for | 
|  | 4817 | * sequential page access (where each new request is consecutive | 
|  | 4818 | * to the previous one). Repeated lookups are O(lg(obj->base.size)), | 
|  | 4819 | * i.e. O(1) with a large constant! | 
|  | 4820 | */ | 
|  | 4821 | if (n < READ_ONCE(iter->sg_idx)) | 
|  | 4822 | goto lookup; | 
|  | 4823 |  | 
|  | 4824 | mutex_lock(&iter->lock); | 
|  | 4825 |  | 
|  | 4826 | /* We prefer to reuse the last sg so that repeated lookup of this | 
|  | 4827 | * (or the subsequent) sg are fast - comparing against the last | 
|  | 4828 | * sg is faster than going through the radixtree. | 
|  | 4829 | */ | 
|  | 4830 |  | 
|  | 4831 | sg = iter->sg_pos; | 
|  | 4832 | idx = iter->sg_idx; | 
|  | 4833 | count = __sg_page_count(sg); | 
|  | 4834 |  | 
|  | 4835 | while (idx + count <= n) { | 
|  | 4836 | unsigned long exception, i; | 
|  | 4837 | int ret; | 
|  | 4838 |  | 
|  | 4839 | /* If we cannot allocate and insert this entry, or the | 
|  | 4840 | * individual pages from this range, cancel updating the | 
|  | 4841 | * sg_idx so that on this lookup we are forced to linearly | 
|  | 4842 | * scan onwards, but on future lookups we will try the | 
|  | 4843 | * insertion again (in which case we need to be careful of | 
|  | 4844 | * the error return reporting that we have already inserted | 
|  | 4845 | * this index). | 
|  | 4846 | */ | 
|  | 4847 | ret = radix_tree_insert(&iter->radix, idx, sg); | 
|  | 4848 | if (ret && ret != -EEXIST) | 
|  | 4849 | goto scan; | 
|  | 4850 |  | 
|  | 4851 | exception = | 
|  | 4852 | RADIX_TREE_EXCEPTIONAL_ENTRY | | 
|  | 4853 | idx << RADIX_TREE_EXCEPTIONAL_SHIFT; | 
|  | 4854 | for (i = 1; i < count; i++) { | 
|  | 4855 | ret = radix_tree_insert(&iter->radix, idx + i, | 
|  | 4856 | (void *)exception); | 
|  | 4857 | if (ret && ret != -EEXIST) | 
|  | 4858 | goto scan; | 
|  | 4859 | } | 
|  | 4860 |  | 
|  | 4861 | idx += count; | 
|  | 4862 | sg = ____sg_next(sg); | 
|  | 4863 | count = __sg_page_count(sg); | 
|  | 4864 | } | 
|  | 4865 |  | 
|  | 4866 | scan: | 
|  | 4867 | iter->sg_pos = sg; | 
|  | 4868 | iter->sg_idx = idx; | 
|  | 4869 |  | 
|  | 4870 | mutex_unlock(&iter->lock); | 
|  | 4871 |  | 
|  | 4872 | if (unlikely(n < idx)) /* insertion completed by another thread */ | 
|  | 4873 | goto lookup; | 
|  | 4874 |  | 
|  | 4875 | /* In case we failed to insert the entry into the radixtree, we need | 
|  | 4876 | * to look beyond the current sg. | 
|  | 4877 | */ | 
|  | 4878 | while (idx + count <= n) { | 
|  | 4879 | idx += count; | 
|  | 4880 | sg = ____sg_next(sg); | 
|  | 4881 | count = __sg_page_count(sg); | 
|  | 4882 | } | 
|  | 4883 |  | 
|  | 4884 | *offset = n - idx; | 
|  | 4885 | return sg; | 
|  | 4886 |  | 
|  | 4887 | lookup: | 
|  | 4888 | rcu_read_lock(); | 
|  | 4889 |  | 
|  | 4890 | sg = radix_tree_lookup(&iter->radix, n); | 
|  | 4891 | GEM_BUG_ON(!sg); | 
|  | 4892 |  | 
|  | 4893 | /* If this index is in the middle of multi-page sg entry, | 
|  | 4894 | * the radixtree will contain an exceptional entry that points | 
|  | 4895 | * to the start of that range. We will return the pointer to | 
|  | 4896 | * the base page and the offset of this page within the | 
|  | 4897 | * sg entry's range. | 
|  | 4898 | */ | 
|  | 4899 | *offset = 0; | 
|  | 4900 | if (unlikely(radix_tree_exception(sg))) { | 
|  | 4901 | unsigned long base = | 
|  | 4902 | (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT; | 
|  | 4903 |  | 
|  | 4904 | sg = radix_tree_lookup(&iter->radix, base); | 
|  | 4905 | GEM_BUG_ON(!sg); | 
|  | 4906 |  | 
|  | 4907 | *offset = n - base; | 
|  | 4908 | } | 
|  | 4909 |  | 
|  | 4910 | rcu_read_unlock(); | 
|  | 4911 |  | 
|  | 4912 | return sg; | 
|  | 4913 | } | 
|  | 4914 |  | 
|  | 4915 | struct page * | 
|  | 4916 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) | 
|  | 4917 | { | 
|  | 4918 | struct scatterlist *sg; | 
|  | 4919 | unsigned int offset; | 
|  | 4920 |  | 
|  | 4921 | GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); | 
|  | 4922 |  | 
|  | 4923 | sg = i915_gem_object_get_sg(obj, n, &offset); | 
|  | 4924 | return nth_page(sg_page(sg), offset); | 
|  | 4925 | } | 
|  | 4926 |  | 
|  | 4927 | /* Like i915_gem_object_get_page(), but mark the returned page dirty */ | 
|  | 4928 | struct page * | 
|  | 4929 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, | 
|  | 4930 | unsigned int n) | 
|  | 4931 | { | 
|  | 4932 | struct page *page; | 
|  | 4933 |  | 
|  | 4934 | page = i915_gem_object_get_page(obj, n); | 
| Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 4935 | if (!obj->mm.dirty) | 
| Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 4936 | set_page_dirty(page); | 
|  | 4937 |  | 
|  | 4938 | return page; | 
|  | 4939 | } | 
|  | 4940 |  | 
|  | 4941 | dma_addr_t | 
|  | 4942 | i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, | 
|  | 4943 | unsigned long n) | 
|  | 4944 | { | 
|  | 4945 | struct scatterlist *sg; | 
|  | 4946 | unsigned int offset; | 
|  | 4947 |  | 
|  | 4948 | sg = i915_gem_object_get_sg(obj, n, &offset); | 
|  | 4949 | return sg_dma_address(sg) + (offset << PAGE_SHIFT); | 
|  | 4950 | } |