blob: 87ac21ec23d290db82e90c6fe6257e4cffdb5955 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_dma.h"
32
33#define USE_REFCNT (dev_priv->card_type >= NV_10)
34
35struct nouveau_fence {
36 struct nouveau_channel *channel;
37 struct kref refcount;
38 struct list_head entry;
39
40 uint32_t sequence;
41 bool signalled;
42};
43
44static inline struct nouveau_fence *
45nouveau_fence(void *sync_obj)
46{
47 return (struct nouveau_fence *)sync_obj;
48}
49
50static void
51nouveau_fence_del(struct kref *ref)
52{
53 struct nouveau_fence *fence =
54 container_of(ref, struct nouveau_fence, refcount);
55
56 kfree(fence);
57}
58
59void
60nouveau_fence_update(struct nouveau_channel *chan)
61{
62 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
63 struct list_head *entry, *tmp;
64 struct nouveau_fence *fence;
65 uint32_t sequence;
66
Francisco Jerez3ba64622010-08-28 17:56:33 +020067 spin_lock(&chan->fence.lock);
68
Ben Skeggs6ee73862009-12-11 19:24:15 +100069 if (USE_REFCNT)
70 sequence = nvchan_rd32(chan, 0x48);
71 else
Ben Skeggs047d1d32010-05-31 12:00:43 +100072 sequence = atomic_read(&chan->fence.last_sequence_irq);
Ben Skeggs6ee73862009-12-11 19:24:15 +100073
74 if (chan->fence.sequence_ack == sequence)
Francisco Jerez3ba64622010-08-28 17:56:33 +020075 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +100076 chan->fence.sequence_ack = sequence;
77
78 list_for_each_safe(entry, tmp, &chan->fence.pending) {
79 fence = list_entry(entry, struct nouveau_fence, entry);
80
81 sequence = fence->sequence;
82 fence->signalled = true;
83 list_del(&fence->entry);
84 kref_put(&fence->refcount, nouveau_fence_del);
85
86 if (sequence == chan->fence.sequence_ack)
87 break;
88 }
Francisco Jerez3ba64622010-08-28 17:56:33 +020089out:
Ben Skeggs047d1d32010-05-31 12:00:43 +100090 spin_unlock(&chan->fence.lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +100091}
92
93int
94nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
95 bool emit)
96{
97 struct nouveau_fence *fence;
98 int ret = 0;
99
100 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
101 if (!fence)
102 return -ENOMEM;
103 kref_init(&fence->refcount);
104 fence->channel = chan;
105
106 if (emit)
107 ret = nouveau_fence_emit(fence);
108
109 if (ret)
110 nouveau_fence_unref((void *)&fence);
111 *pfence = fence;
112 return ret;
113}
114
115struct nouveau_channel *
116nouveau_fence_channel(struct nouveau_fence *fence)
117{
118 return fence ? fence->channel : NULL;
119}
120
121int
122nouveau_fence_emit(struct nouveau_fence *fence)
123{
124 struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
125 struct nouveau_channel *chan = fence->channel;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000126 int ret;
127
128 ret = RING_SPACE(chan, 2);
129 if (ret)
130 return ret;
131
132 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000133 nouveau_fence_update(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000134
135 BUG_ON(chan->fence.sequence ==
136 chan->fence.sequence_ack - 1);
137 }
138
139 fence->sequence = ++chan->fence.sequence;
140
141 kref_get(&fence->refcount);
Ben Skeggs047d1d32010-05-31 12:00:43 +1000142 spin_lock(&chan->fence.lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000143 list_add_tail(&fence->entry, &chan->fence.pending);
Ben Skeggs047d1d32010-05-31 12:00:43 +1000144 spin_unlock(&chan->fence.lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000145
Francisco Jereza5027cc2009-12-26 02:09:36 +0100146 BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000147 OUT_RING(chan, fence->sequence);
148 FIRE_RING(chan);
149
150 return 0;
151}
152
153void
154nouveau_fence_unref(void **sync_obj)
155{
156 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
157
158 if (fence)
159 kref_put(&fence->refcount, nouveau_fence_del);
160 *sync_obj = NULL;
161}
162
163void *
164nouveau_fence_ref(void *sync_obj)
165{
166 struct nouveau_fence *fence = nouveau_fence(sync_obj);
167
168 kref_get(&fence->refcount);
169 return sync_obj;
170}
171
172bool
173nouveau_fence_signalled(void *sync_obj, void *sync_arg)
174{
175 struct nouveau_fence *fence = nouveau_fence(sync_obj);
176 struct nouveau_channel *chan = fence->channel;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000177
178 if (fence->signalled)
179 return true;
180
Ben Skeggs6ee73862009-12-11 19:24:15 +1000181 nouveau_fence_update(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000182 return fence->signalled;
183}
184
185int
186nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
187{
188 unsigned long timeout = jiffies + (3 * DRM_HZ);
189 int ret = 0;
190
Ben Skeggs6ee73862009-12-11 19:24:15 +1000191 while (1) {
192 if (nouveau_fence_signalled(sync_obj, sync_arg))
193 break;
194
195 if (time_after_eq(jiffies, timeout)) {
196 ret = -EBUSY;
197 break;
198 }
199
Kulikov Vasiliy05991112010-07-26 12:23:54 +0400200 __set_current_state(intr ? TASK_INTERRUPTIBLE
201 : TASK_UNINTERRUPTIBLE);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000202 if (lazy)
203 schedule_timeout(1);
204
205 if (intr && signal_pending(current)) {
Ben Skeggs9ddc8c52009-12-15 11:04:25 +1000206 ret = -ERESTARTSYS;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000207 break;
208 }
209 }
210
211 __set_current_state(TASK_RUNNING);
212
213 return ret;
214}
215
216int
217nouveau_fence_flush(void *sync_obj, void *sync_arg)
218{
219 return 0;
220}
221
Ben Skeggs6ee73862009-12-11 19:24:15 +1000222int
223nouveau_fence_init(struct nouveau_channel *chan)
224{
225 INIT_LIST_HEAD(&chan->fence.pending);
226 spin_lock_init(&chan->fence.lock);
Ben Skeggs047d1d32010-05-31 12:00:43 +1000227 atomic_set(&chan->fence.last_sequence_irq, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000228 return 0;
229}
230
231void
232nouveau_fence_fini(struct nouveau_channel *chan)
233{
234 struct list_head *entry, *tmp;
235 struct nouveau_fence *fence;
236
237 list_for_each_safe(entry, tmp, &chan->fence.pending) {
238 fence = list_entry(entry, struct nouveau_fence, entry);
239
240 fence->signalled = true;
241 list_del(&fence->entry);
242 kref_put(&fence->refcount, nouveau_fence_del);
243 }
244}
245