blob: 90ee8add2443a838d3e23486b9ce417612825a9b [file] [log] [blame]
Jerome Glisseb15ba512011-11-15 11:48:34 -05001/*
2 * Copyright 2011 Red Hat Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 */
30#include "drmP.h"
31#include "drm.h"
32#include "radeon.h"
33
34int radeon_sa_bo_manager_init(struct radeon_device *rdev,
35 struct radeon_sa_manager *sa_manager,
36 unsigned size, u32 domain)
37{
38 int r;
39
Christian Königa651c552012-05-09 15:34:50 +020040 spin_lock_init(&sa_manager->lock);
Jerome Glisseb15ba512011-11-15 11:48:34 -050041 sa_manager->bo = NULL;
42 sa_manager->size = size;
43 sa_manager->domain = domain;
44 INIT_LIST_HEAD(&sa_manager->sa_bo);
45
46 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
47 RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
48 if (r) {
49 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
50 return r;
51 }
52
53 return r;
54}
55
56void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
57 struct radeon_sa_manager *sa_manager)
58{
59 struct radeon_sa_bo *sa_bo, *tmp;
60
61 if (!list_empty(&sa_manager->sa_bo)) {
62 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
63 }
64 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
65 list_del_init(&sa_bo->list);
66 }
67 radeon_bo_unref(&sa_manager->bo);
68 sa_manager->size = 0;
69}
70
71int radeon_sa_bo_manager_start(struct radeon_device *rdev,
72 struct radeon_sa_manager *sa_manager)
73{
74 int r;
75
76 if (sa_manager->bo == NULL) {
77 dev_err(rdev->dev, "no bo for sa manager\n");
78 return -EINVAL;
79 }
80
81 /* map the buffer */
82 r = radeon_bo_reserve(sa_manager->bo, false);
83 if (r) {
84 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
85 return r;
86 }
87 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
88 if (r) {
89 radeon_bo_unreserve(sa_manager->bo);
90 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
91 return r;
92 }
93 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
94 radeon_bo_unreserve(sa_manager->bo);
95 return r;
96}
97
98int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
99 struct radeon_sa_manager *sa_manager)
100{
101 int r;
102
103 if (sa_manager->bo == NULL) {
104 dev_err(rdev->dev, "no bo for sa manager\n");
105 return -EINVAL;
106 }
107
108 r = radeon_bo_reserve(sa_manager->bo, false);
109 if (!r) {
110 radeon_bo_kunmap(sa_manager->bo);
111 radeon_bo_unpin(sa_manager->bo);
112 radeon_bo_unreserve(sa_manager->bo);
113 }
114 return r;
115}
116
117/*
118 * Principe is simple, we keep a list of sub allocation in offset
119 * order (first entry has offset == 0, last entry has the highest
120 * offset).
121 *
122 * When allocating new object we first check if there is room at
123 * the end total_size - (last_object_offset + last_object_size) >=
124 * alloc_size. If so we allocate new object there.
125 *
126 * When there is not enough room at the end, we start waiting for
127 * each sub object until we reach object_offset+object_size >=
128 * alloc_size, this object then become the sub object we return.
129 *
130 * Alignment can't be bigger than page size
131 */
Christian König557017a2012-05-09 15:34:54 +0200132
133static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
134{
135 list_del(&sa_bo->list);
136 radeon_fence_unref(&sa_bo->fence);
137 kfree(sa_bo);
138}
139
Jerome Glisseb15ba512011-11-15 11:48:34 -0500140int radeon_sa_bo_new(struct radeon_device *rdev,
141 struct radeon_sa_manager *sa_manager,
Christian König2e0d9912012-05-09 15:34:53 +0200142 struct radeon_sa_bo **sa_bo,
Christian König557017a2012-05-09 15:34:54 +0200143 unsigned size, unsigned align, bool block)
Jerome Glisseb15ba512011-11-15 11:48:34 -0500144{
Christian König557017a2012-05-09 15:34:54 +0200145 struct radeon_fence *fence = NULL;
146 struct radeon_sa_bo *tmp, *next;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500147 struct list_head *head;
148 unsigned offset = 0, wasted = 0;
Christian König557017a2012-05-09 15:34:54 +0200149 int r;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500150
151 BUG_ON(align > RADEON_GPU_PAGE_SIZE);
152 BUG_ON(size > sa_manager->size);
Christian König2e0d9912012-05-09 15:34:53 +0200153
154 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
155
Christian König557017a2012-05-09 15:34:54 +0200156retry:
157
Christian Königa651c552012-05-09 15:34:50 +0200158 spin_lock(&sa_manager->lock);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500159
160 /* no one ? */
161 head = sa_manager->sa_bo.prev;
162 if (list_empty(&sa_manager->sa_bo)) {
163 goto out;
164 }
165
166 /* look for a hole big enough */
167 offset = 0;
Christian König557017a2012-05-09 15:34:54 +0200168 list_for_each_entry_safe(tmp, next, &sa_manager->sa_bo, list) {
169 /* try to free this object */
170 if (tmp->fence) {
171 if (radeon_fence_signaled(tmp->fence)) {
172 radeon_sa_bo_remove_locked(tmp);
173 continue;
174 } else {
175 fence = tmp->fence;
176 }
177 }
178
Jerome Glisseb15ba512011-11-15 11:48:34 -0500179 /* room before this object ? */
Christian Könige6661a92012-05-09 15:34:52 +0200180 if (offset < tmp->soffset && (tmp->soffset - offset) >= size) {
Jerome Glisseb15ba512011-11-15 11:48:34 -0500181 head = tmp->list.prev;
182 goto out;
183 }
Christian Könige6661a92012-05-09 15:34:52 +0200184 offset = tmp->eoffset;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500185 wasted = offset % align;
186 if (wasted) {
187 wasted = align - wasted;
188 }
189 offset += wasted;
190 }
191 /* room at the end ? */
192 head = sa_manager->sa_bo.prev;
193 tmp = list_entry(head, struct radeon_sa_bo, list);
Christian Könige6661a92012-05-09 15:34:52 +0200194 offset = tmp->eoffset;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500195 wasted = offset % align;
196 if (wasted) {
197 wasted = align - wasted;
198 }
199 offset += wasted;
200 if ((sa_manager->size - offset) < size) {
201 /* failed to find somethings big enough */
Christian Königa651c552012-05-09 15:34:50 +0200202 spin_unlock(&sa_manager->lock);
Christian König557017a2012-05-09 15:34:54 +0200203 if (block && fence) {
204 r = radeon_fence_wait(fence, false);
205 if (r)
206 return r;
207
208 goto retry;
209 }
Christian König2e0d9912012-05-09 15:34:53 +0200210 kfree(*sa_bo);
211 *sa_bo = NULL;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500212 return -ENOMEM;
213 }
214
215out:
Christian König2e0d9912012-05-09 15:34:53 +0200216 (*sa_bo)->manager = sa_manager;
217 (*sa_bo)->soffset = offset;
218 (*sa_bo)->eoffset = offset + size;
219 list_add(&(*sa_bo)->list, head);
Christian Königa651c552012-05-09 15:34:50 +0200220 spin_unlock(&sa_manager->lock);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500221 return 0;
222}
223
Christian König557017a2012-05-09 15:34:54 +0200224void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
225 struct radeon_fence *fence)
Jerome Glisseb15ba512011-11-15 11:48:34 -0500226{
Christian König557017a2012-05-09 15:34:54 +0200227 struct radeon_sa_manager *sa_manager;
228
Christian König2e0d9912012-05-09 15:34:53 +0200229 if (!sa_bo || !*sa_bo)
230 return;
231
Christian König557017a2012-05-09 15:34:54 +0200232 sa_manager = (*sa_bo)->manager;
233 spin_lock(&sa_manager->lock);
234 if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
235 (*sa_bo)->fence = radeon_fence_ref(fence);
236 } else {
237 radeon_sa_bo_remove_locked(*sa_bo);
238 }
239 spin_unlock(&sa_manager->lock);
Christian König2e0d9912012-05-09 15:34:53 +0200240 *sa_bo = NULL;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500241}
Christian König711a9722012-05-09 15:34:51 +0200242
243#if defined(CONFIG_DEBUG_FS)
244void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
245 struct seq_file *m)
246{
247 struct radeon_sa_bo *i;
248
249 spin_lock(&sa_manager->lock);
250 list_for_each_entry(i, &sa_manager->sa_bo, list) {
Christian König557017a2012-05-09 15:34:54 +0200251 seq_printf(m, "[%08x %08x] size %4d (%p)",
Christian Könige6661a92012-05-09 15:34:52 +0200252 i->soffset, i->eoffset, i->eoffset - i->soffset, i);
Christian König557017a2012-05-09 15:34:54 +0200253 if (i->fence) {
254 seq_printf(m, " protected by %Ld (%p) on ring %d\n",
255 i->fence->seq, i->fence, i->fence->ring);
256 } else {
257 seq_printf(m, "\n");
258 }
Christian König711a9722012-05-09 15:34:51 +0200259 }
260 spin_unlock(&sa_manager->lock);
261}
262#endif