blob: 7bf2f3f2968e64fa62d94b4dd20540da6538fb09 [file] [log] [blame]
Brad Volkin493018d2014-12-11 12:13:08 -08001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
Chris Wilsoned9ddd22015-04-07 16:20:34 +010026#include "i915_gem_batch_pool.h"
Brad Volkin493018d2014-12-11 12:13:08 -080027
28/**
29 * DOC: batch pool
30 *
31 * In order to submit batch buffers as 'secure', the software command parser
32 * must ensure that a batch buffer cannot be modified after parsing. It does
33 * this by copying the user provided batch buffer contents to a kernel owned
34 * buffer from which the hardware will actually execute, and by carefully
35 * managing the address space bindings for such buffers.
36 *
37 * The batch pool framework provides a mechanism for the driver to manage a
38 * set of scratch buffers to use for this purpose. The framework can be
39 * extended to support other uses cases should they arise.
40 */
41
42/**
43 * i915_gem_batch_pool_init() - initialize a batch buffer pool
44 * @dev: the drm device
45 * @pool: the batch buffer pool
46 */
47void i915_gem_batch_pool_init(struct drm_device *dev,
48 struct i915_gem_batch_pool *pool)
49{
Chris Wilson8d9d5742015-04-07 16:20:38 +010050 int n;
51
Brad Volkin493018d2014-12-11 12:13:08 -080052 pool->dev = dev;
Chris Wilson8d9d5742015-04-07 16:20:38 +010053
54 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
55 INIT_LIST_HEAD(&pool->cache_list[n]);
Brad Volkin493018d2014-12-11 12:13:08 -080056}
57
58/**
59 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
60 * @pool: the pool to clean up
61 *
62 * Note: Callers must hold the struct_mutex.
63 */
64void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
65{
Chris Wilson8d9d5742015-04-07 16:20:38 +010066 int n;
67
Brad Volkin493018d2014-12-11 12:13:08 -080068 WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
69
Chris Wilson8d9d5742015-04-07 16:20:38 +010070 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
71 while (!list_empty(&pool->cache_list[n])) {
72 struct drm_i915_gem_object *obj =
73 list_first_entry(&pool->cache_list[n],
74 struct drm_i915_gem_object,
75 batch_pool_link);
Brad Volkin493018d2014-12-11 12:13:08 -080076
Chris Wilson8d9d5742015-04-07 16:20:38 +010077 list_del(&obj->batch_pool_link);
78 drm_gem_object_unreference(&obj->base);
79 }
Brad Volkin493018d2014-12-11 12:13:08 -080080 }
81}
82
83/**
Chris Wilsonde4e7832015-04-07 16:20:35 +010084 * i915_gem_batch_pool_get() - allocate a buffer from the pool
Brad Volkin493018d2014-12-11 12:13:08 -080085 * @pool: the batch buffer pool
86 * @size: the minimum desired size of the returned buffer
87 *
Chris Wilsonde4e7832015-04-07 16:20:35 +010088 * Returns an inactive buffer from @pool with at least @size bytes,
89 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
90 * on the returned object.
Brad Volkin493018d2014-12-11 12:13:08 -080091 *
92 * Note: Callers must hold the struct_mutex
93 *
Chris Wilsonde4e7832015-04-07 16:20:35 +010094 * Return: the buffer object or an error pointer
Brad Volkin493018d2014-12-11 12:13:08 -080095 */
96struct drm_i915_gem_object *
97i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
98 size_t size)
99{
100 struct drm_i915_gem_object *obj = NULL;
101 struct drm_i915_gem_object *tmp, *next;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100102 struct list_head *list;
103 int n;
Brad Volkin493018d2014-12-11 12:13:08 -0800104
105 WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
106
Chris Wilson8d9d5742015-04-07 16:20:38 +0100107 /* Compute a power-of-two bucket, but throw everything greater than
108 * 16KiB into the same bucket: i.e. the the buckets hold objects of
109 * (1 page, 2 pages, 4 pages, 8+ pages).
110 */
111 n = fls(size >> PAGE_SHIFT) - 1;
112 if (n >= ARRAY_SIZE(pool->cache_list))
113 n = ARRAY_SIZE(pool->cache_list) - 1;
114 list = &pool->cache_list[n];
115
116 list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
Chris Wilson06fbca72015-04-07 16:20:36 +0100117 /* The batches are strictly LRU ordered */
Brad Volkin493018d2014-12-11 12:13:08 -0800118 if (tmp->active)
Chris Wilson06fbca72015-04-07 16:20:36 +0100119 break;
Brad Volkin493018d2014-12-11 12:13:08 -0800120
121 /* While we're looping, do some clean up */
122 if (tmp->madv == __I915_MADV_PURGED) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100123 list_del(&tmp->batch_pool_link);
Brad Volkin493018d2014-12-11 12:13:08 -0800124 drm_gem_object_unreference(&tmp->base);
125 continue;
126 }
127
Chris Wilson8d9d5742015-04-07 16:20:38 +0100128 if (tmp->base.size >= size) {
Brad Volkin493018d2014-12-11 12:13:08 -0800129 obj = tmp;
130 break;
131 }
132 }
133
Chris Wilsonde4e7832015-04-07 16:20:35 +0100134 if (obj == NULL) {
135 int ret;
136
Brad Volkin493018d2014-12-11 12:13:08 -0800137 obj = i915_gem_alloc_object(pool->dev, size);
Chris Wilsonde4e7832015-04-07 16:20:35 +0100138 if (obj == NULL)
Brad Volkin493018d2014-12-11 12:13:08 -0800139 return ERR_PTR(-ENOMEM);
140
Chris Wilsonde4e7832015-04-07 16:20:35 +0100141 ret = i915_gem_object_get_pages(obj);
142 if (ret)
143 return ERR_PTR(ret);
144
145 obj->madv = I915_MADV_DONTNEED;
Brad Volkin493018d2014-12-11 12:13:08 -0800146 }
Brad Volkin493018d2014-12-11 12:13:08 -0800147
Chris Wilson8d9d5742015-04-07 16:20:38 +0100148 list_move_tail(&obj->batch_pool_link, list);
Chris Wilsonde4e7832015-04-07 16:20:35 +0100149 i915_gem_object_pin_pages(obj);
Brad Volkin493018d2014-12-11 12:13:08 -0800150 return obj;
151}