Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "i915_drv.h" |
Chris Wilson | ed9ddd2 | 2015-04-07 16:20:34 +0100 | [diff] [blame] | 26 | #include "i915_gem_batch_pool.h" |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 27 | |
| 28 | /** |
| 29 | * DOC: batch pool |
| 30 | * |
| 31 | * In order to submit batch buffers as 'secure', the software command parser |
| 32 | * must ensure that a batch buffer cannot be modified after parsing. It does |
| 33 | * this by copying the user provided batch buffer contents to a kernel owned |
| 34 | * buffer from which the hardware will actually execute, and by carefully |
| 35 | * managing the address space bindings for such buffers. |
| 36 | * |
| 37 | * The batch pool framework provides a mechanism for the driver to manage a |
| 38 | * set of scratch buffers to use for this purpose. The framework can be |
| 39 | * extended to support other uses cases should they arise. |
| 40 | */ |
| 41 | |
| 42 | /** |
| 43 | * i915_gem_batch_pool_init() - initialize a batch buffer pool |
Chris Wilson | 115003e9 | 2016-08-04 16:32:19 +0100 | [diff] [blame] | 44 | * @engine: the associated request submission engine |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 45 | * @pool: the batch buffer pool |
| 46 | */ |
Chris Wilson | 115003e9 | 2016-08-04 16:32:19 +0100 | [diff] [blame] | 47 | void i915_gem_batch_pool_init(struct intel_engine_cs *engine, |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 48 | struct i915_gem_batch_pool *pool) |
| 49 | { |
Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 50 | int n; |
| 51 | |
Chris Wilson | 115003e9 | 2016-08-04 16:32:19 +0100 | [diff] [blame] | 52 | pool->engine = engine; |
Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 53 | |
| 54 | for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) |
| 55 | INIT_LIST_HEAD(&pool->cache_list[n]); |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | /** |
| 59 | * i915_gem_batch_pool_fini() - clean up a batch buffer pool |
| 60 | * @pool: the pool to clean up |
| 61 | * |
| 62 | * Note: Callers must hold the struct_mutex. |
| 63 | */ |
| 64 | void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) |
| 65 | { |
Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 66 | int n; |
| 67 | |
Chris Wilson | 115003e9 | 2016-08-04 16:32:19 +0100 | [diff] [blame] | 68 | lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 69 | |
Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 70 | for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { |
Chris Wilson | f67cbce | 2016-07-26 12:01:53 +0100 | [diff] [blame] | 71 | struct drm_i915_gem_object *obj, *next; |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 72 | |
Chris Wilson | f67cbce | 2016-07-26 12:01:53 +0100 | [diff] [blame] | 73 | list_for_each_entry_safe(obj, next, |
| 74 | &pool->cache_list[n], |
| 75 | batch_pool_link) |
Chris Wilson | f8c417c | 2016-07-20 13:31:53 +0100 | [diff] [blame] | 76 | i915_gem_object_put(obj); |
Chris Wilson | f67cbce | 2016-07-26 12:01:53 +0100 | [diff] [blame] | 77 | |
| 78 | INIT_LIST_HEAD(&pool->cache_list[n]); |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 79 | } |
| 80 | } |
| 81 | |
| 82 | /** |
Chris Wilson | de4e783 | 2015-04-07 16:20:35 +0100 | [diff] [blame] | 83 | * i915_gem_batch_pool_get() - allocate a buffer from the pool |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 84 | * @pool: the batch buffer pool |
| 85 | * @size: the minimum desired size of the returned buffer |
| 86 | * |
Chris Wilson | de4e783 | 2015-04-07 16:20:35 +0100 | [diff] [blame] | 87 | * Returns an inactive buffer from @pool with at least @size bytes, |
| 88 | * with the pages pinned. The caller must i915_gem_object_unpin_pages() |
| 89 | * on the returned object. |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 90 | * |
| 91 | * Note: Callers must hold the struct_mutex |
| 92 | * |
Chris Wilson | de4e783 | 2015-04-07 16:20:35 +0100 | [diff] [blame] | 93 | * Return: the buffer object or an error pointer |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 94 | */ |
| 95 | struct drm_i915_gem_object * |
| 96 | i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, |
| 97 | size_t size) |
| 98 | { |
| 99 | struct drm_i915_gem_object *obj = NULL; |
| 100 | struct drm_i915_gem_object *tmp, *next; |
Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 101 | struct list_head *list; |
| 102 | int n; |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 103 | |
Chris Wilson | 115003e9 | 2016-08-04 16:32:19 +0100 | [diff] [blame] | 104 | lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 105 | |
Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 106 | /* Compute a power-of-two bucket, but throw everything greater than |
| 107 | * 16KiB into the same bucket: i.e. the the buckets hold objects of |
| 108 | * (1 page, 2 pages, 4 pages, 8+ pages). |
| 109 | */ |
| 110 | n = fls(size >> PAGE_SHIFT) - 1; |
| 111 | if (n >= ARRAY_SIZE(pool->cache_list)) |
| 112 | n = ARRAY_SIZE(pool->cache_list) - 1; |
| 113 | list = &pool->cache_list[n]; |
| 114 | |
| 115 | list_for_each_entry_safe(tmp, next, list, batch_pool_link) { |
Chris Wilson | 06fbca7 | 2015-04-07 16:20:36 +0100 | [diff] [blame] | 116 | /* The batches are strictly LRU ordered */ |
Chris Wilson | 115003e9 | 2016-08-04 16:32:19 +0100 | [diff] [blame] | 117 | if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id], |
| 118 | &tmp->base.dev->struct_mutex)) |
Chris Wilson | 06fbca7 | 2015-04-07 16:20:36 +0100 | [diff] [blame] | 119 | break; |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 120 | |
| 121 | /* While we're looping, do some clean up */ |
| 122 | if (tmp->madv == __I915_MADV_PURGED) { |
Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 123 | list_del(&tmp->batch_pool_link); |
Chris Wilson | f8c417c | 2016-07-20 13:31:53 +0100 | [diff] [blame] | 124 | i915_gem_object_put(tmp); |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 125 | continue; |
| 126 | } |
| 127 | |
Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 128 | if (tmp->base.size >= size) { |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 129 | obj = tmp; |
| 130 | break; |
| 131 | } |
| 132 | } |
| 133 | |
Chris Wilson | de4e783 | 2015-04-07 16:20:35 +0100 | [diff] [blame] | 134 | if (obj == NULL) { |
| 135 | int ret; |
| 136 | |
Chris Wilson | 115003e9 | 2016-08-04 16:32:19 +0100 | [diff] [blame] | 137 | obj = i915_gem_object_create(&pool->engine->i915->drm, size); |
Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 138 | if (IS_ERR(obj)) |
| 139 | return obj; |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 140 | |
Chris Wilson | de4e783 | 2015-04-07 16:20:35 +0100 | [diff] [blame] | 141 | ret = i915_gem_object_get_pages(obj); |
| 142 | if (ret) |
| 143 | return ERR_PTR(ret); |
| 144 | |
| 145 | obj->madv = I915_MADV_DONTNEED; |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 146 | } |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 147 | |
Chris Wilson | 8d9d574 | 2015-04-07 16:20:38 +0100 | [diff] [blame] | 148 | list_move_tail(&obj->batch_pool_link, list); |
Chris Wilson | de4e783 | 2015-04-07 16:20:35 +0100 | [diff] [blame] | 149 | i915_gem_object_pin_pages(obj); |
Brad Volkin | 493018d | 2014-12-11 12:13:08 -0800 | [diff] [blame] | 150 | return obj; |
| 151 | } |