blob: 10a44a1d44fca818c83341cc0268a3c52eda93ca [file] [log] [blame]
Ben Skeggs26f6d882011-07-04 16:25:18 +10001/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
Ben Skeggs51beb422011-07-05 10:33:08 +100025#include <linux/dma-mapping.h>
Ben Skeggs26f6d882011-07-04 16:25:18 +100026#include "drmP.h"
27
28#include "nouveau_drv.h"
29#include "nouveau_connector.h"
30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h"
32
33struct nvd0_display {
34 struct nouveau_gpuobj *mem;
Ben Skeggs51beb422011-07-05 10:33:08 +100035 struct {
36 dma_addr_t handle;
37 u32 *ptr;
38 } evo[1];
Ben Skeggs26f6d882011-07-04 16:25:18 +100039};
40
41static struct nvd0_display *
42nvd0_display(struct drm_device *dev)
43{
44 struct drm_nouveau_private *dev_priv = dev->dev_private;
45 return dev_priv->engine.display.priv;
46}
47
Ben Skeggs51beb422011-07-05 10:33:08 +100048static int
49evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
50{
51 int ret = 0;
52 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
53 nv_wr32(dev, 0x610704 + (id * 0x10), data);
54 nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
55 if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
56 ret = -EBUSY;
57 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
58 return ret;
59}
60
61static u32 *
62evo_wait(struct drm_device *dev, int id, int nr)
63{
64 struct nvd0_display *disp = nvd0_display(dev);
65 u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4;
66
67 if (put + nr >= (PAGE_SIZE / 4)) {
68 disp->evo[id].ptr[put] = 0x20000000;
69
70 nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000);
71 if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
72 NV_ERROR(dev, "evo %d dma stalled\n", id);
73 return NULL;
74 }
75
76 put = 0;
77 }
78
79 return disp->evo[id].ptr + put;
80}
81
82static void
83evo_kick(u32 *push, struct drm_device *dev, int id)
84{
85 struct nvd0_display *disp = nvd0_display(dev);
86 nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
87}
88
89#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
90#define evo_data(p,d) *((p)++) = (d)
91
Ben Skeggs26f6d882011-07-04 16:25:18 +100092/******************************************************************************
93 * DAC
94 *****************************************************************************/
95
96/******************************************************************************
97 * SOR
98 *****************************************************************************/
99
100/******************************************************************************
101 * IRQ
102 *****************************************************************************/
Ben Skeggs46005222011-07-05 11:01:13 +1000103static void
104nvd0_display_intr(struct drm_device *dev)
105{
106 u32 intr = nv_rd32(dev, 0x610088);
107
108 if (intr & 0x00000002) {
109 u32 stat = nv_rd32(dev, 0x61009c);
110 int chid = ffs(stat) - 1;
111 if (chid >= 0) {
112 u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12));
113 u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12));
114 u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12));
115
116 NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
117 "0x%08x 0x%08x\n",
118 chid, (mthd & 0x0000ffc), data, mthd, unkn);
119 nv_wr32(dev, 0x61009c, (1 << chid));
120 nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000);
121 }
122
123 intr &= ~0x00000002;
124 }
125
126 if (intr & 0x01000000) {
127 u32 stat = nv_rd32(dev, 0x6100bc);
128 nv_wr32(dev, 0x6100bc, stat);
129 intr &= ~0x01000000;
130 }
131
132 if (intr & 0x02000000) {
133 u32 stat = nv_rd32(dev, 0x6108bc);
134 nv_wr32(dev, 0x6108bc, stat);
135 intr &= ~0x02000000;
136 }
137
138 if (intr)
139 NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
140}
Ben Skeggs26f6d882011-07-04 16:25:18 +1000141
142/******************************************************************************
143 * Init
144 *****************************************************************************/
145static void
146nvd0_display_fini(struct drm_device *dev)
147{
148 int i;
149
150 /* fini cursors */
151 for (i = 14; i >= 13; i--) {
152 if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
153 continue;
154
155 nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
156 nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
157 nv_mask(dev, 0x610090, 1 << i, 0x00000000);
158 nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
159 }
160
161 /* fini master */
162 if (nv_rd32(dev, 0x610490) & 0x00000010) {
163 nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
164 nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
165 nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
166 nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
167 nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
168 }
169}
170
171int
172nvd0_display_init(struct drm_device *dev)
173{
174 struct nvd0_display *disp = nvd0_display(dev);
175 int i;
176
177 if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
178 nv_wr32(dev, 0x6100ac, 0x00000100);
179 nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000);
180 if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) {
181 NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n",
182 nv_rd32(dev, 0x6194e8));
183 return -EBUSY;
184 }
185 }
186
187 nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
188
189 /* init master */
Ben Skeggs51beb422011-07-05 10:33:08 +1000190 nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
Ben Skeggs26f6d882011-07-04 16:25:18 +1000191 nv_wr32(dev, 0x610498, 0x00010000);
192 nv_wr32(dev, 0x61049c, 0x00000000);
193 nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
194 nv_wr32(dev, 0x640000, 0x00000000);
195 nv_wr32(dev, 0x610490, 0x01000013);
196 if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
197 NV_ERROR(dev, "PDISP: master 0x%08x\n",
198 nv_rd32(dev, 0x610490));
199 return -EBUSY;
200 }
201 nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
202 nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
203
204 /* init cursors */
205 for (i = 13; i <= 14; i++) {
206 nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
207 if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
208 NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
209 nv_rd32(dev, 0x610490 + (i * 0x10)));
210 return -EBUSY;
211 }
212
213 nv_mask(dev, 0x610090, 1 << i, 1 << i);
214 nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
215 }
216
217 return 0;
218}
219
220void
221nvd0_display_destroy(struct drm_device *dev)
222{
223 struct drm_nouveau_private *dev_priv = dev->dev_private;
224 struct nvd0_display *disp = nvd0_display(dev);
Ben Skeggs51beb422011-07-05 10:33:08 +1000225 struct pci_dev *pdev = dev->pdev;
Ben Skeggs26f6d882011-07-04 16:25:18 +1000226
227 nvd0_display_fini(dev);
228
Ben Skeggs51beb422011-07-05 10:33:08 +1000229 pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
Ben Skeggs26f6d882011-07-04 16:25:18 +1000230 nouveau_gpuobj_ref(NULL, &disp->mem);
Ben Skeggs46005222011-07-05 11:01:13 +1000231 nouveau_irq_unregister(dev, 26);
Ben Skeggs51beb422011-07-05 10:33:08 +1000232
233 dev_priv->engine.display.priv = NULL;
Ben Skeggs26f6d882011-07-04 16:25:18 +1000234 kfree(disp);
235}
236
237int
238nvd0_display_create(struct drm_device *dev)
239{
240 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs51beb422011-07-05 10:33:08 +1000241 struct pci_dev *pdev = dev->pdev;
Ben Skeggs26f6d882011-07-04 16:25:18 +1000242 struct nvd0_display *disp;
243 int ret;
244
245 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
246 if (!disp)
247 return -ENOMEM;
248 dev_priv->engine.display.priv = disp;
249
Ben Skeggs46005222011-07-05 11:01:13 +1000250 /* setup interrupt handling */
251 nouveau_irq_register(dev, 26, nvd0_display_intr);
252
Ben Skeggs51beb422011-07-05 10:33:08 +1000253 /* hash table and dma objects for the memory areas we care about */
254 ret = nouveau_gpuobj_new(dev, NULL, 4 * 1024, 0x1000, 0, &disp->mem);
Ben Skeggs26f6d882011-07-04 16:25:18 +1000255 if (ret)
256 goto out;
257
Ben Skeggs51beb422011-07-05 10:33:08 +1000258 /* push buffers for evo channels */
259 disp->evo[0].ptr =
260 pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
261 if (!disp->evo[0].ptr) {
262 ret = -ENOMEM;
263 goto out;
264 }
265
Ben Skeggs26f6d882011-07-04 16:25:18 +1000266 ret = nvd0_display_init(dev);
267 if (ret)
268 goto out;
269
270out:
271 if (ret)
272 nvd0_display_destroy(dev);
273 return ret;
274}