blob: cd827cda64e4e4c56e1c12cc34edfb0b98b75447 [file] [log] [blame]
Ben Skeggs26f6d882011-07-04 16:25:18 +10001/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
Ben Skeggs51beb422011-07-05 10:33:08 +100025#include <linux/dma-mapping.h>
Ben Skeggs26f6d882011-07-04 16:25:18 +100026#include "drmP.h"
27
28#include "nouveau_drv.h"
29#include "nouveau_connector.h"
30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h"
32
33struct nvd0_display {
34 struct nouveau_gpuobj *mem;
Ben Skeggs51beb422011-07-05 10:33:08 +100035 struct {
36 dma_addr_t handle;
37 u32 *ptr;
38 } evo[1];
Ben Skeggs26f6d882011-07-04 16:25:18 +100039};
40
41static struct nvd0_display *
42nvd0_display(struct drm_device *dev)
43{
44 struct drm_nouveau_private *dev_priv = dev->dev_private;
45 return dev_priv->engine.display.priv;
46}
47
Ben Skeggs51beb422011-07-05 10:33:08 +100048static int
49evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
50{
51 int ret = 0;
52 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
53 nv_wr32(dev, 0x610704 + (id * 0x10), data);
54 nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
55 if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
56 ret = -EBUSY;
57 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
58 return ret;
59}
60
61static u32 *
62evo_wait(struct drm_device *dev, int id, int nr)
63{
64 struct nvd0_display *disp = nvd0_display(dev);
65 u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4;
66
67 if (put + nr >= (PAGE_SIZE / 4)) {
68 disp->evo[id].ptr[put] = 0x20000000;
69
70 nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000);
71 if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
72 NV_ERROR(dev, "evo %d dma stalled\n", id);
73 return NULL;
74 }
75
76 put = 0;
77 }
78
79 return disp->evo[id].ptr + put;
80}
81
82static void
83evo_kick(u32 *push, struct drm_device *dev, int id)
84{
85 struct nvd0_display *disp = nvd0_display(dev);
86 nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
87}
88
89#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
90#define evo_data(p,d) *((p)++) = (d)
91
Ben Skeggs26f6d882011-07-04 16:25:18 +100092/******************************************************************************
93 * DAC
94 *****************************************************************************/
95
96/******************************************************************************
97 * SOR
98 *****************************************************************************/
99
100/******************************************************************************
101 * IRQ
102 *****************************************************************************/
103
104/******************************************************************************
105 * Init
106 *****************************************************************************/
107static void
108nvd0_display_fini(struct drm_device *dev)
109{
110 int i;
111
112 /* fini cursors */
113 for (i = 14; i >= 13; i--) {
114 if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
115 continue;
116
117 nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
118 nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
119 nv_mask(dev, 0x610090, 1 << i, 0x00000000);
120 nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
121 }
122
123 /* fini master */
124 if (nv_rd32(dev, 0x610490) & 0x00000010) {
125 nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
126 nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
127 nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
128 nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
129 nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
130 }
131}
132
133int
134nvd0_display_init(struct drm_device *dev)
135{
136 struct nvd0_display *disp = nvd0_display(dev);
137 int i;
138
139 if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
140 nv_wr32(dev, 0x6100ac, 0x00000100);
141 nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000);
142 if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) {
143 NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n",
144 nv_rd32(dev, 0x6194e8));
145 return -EBUSY;
146 }
147 }
148
149 nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
150
151 /* init master */
Ben Skeggs51beb422011-07-05 10:33:08 +1000152 nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
Ben Skeggs26f6d882011-07-04 16:25:18 +1000153 nv_wr32(dev, 0x610498, 0x00010000);
154 nv_wr32(dev, 0x61049c, 0x00000000);
155 nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
156 nv_wr32(dev, 0x640000, 0x00000000);
157 nv_wr32(dev, 0x610490, 0x01000013);
158 if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
159 NV_ERROR(dev, "PDISP: master 0x%08x\n",
160 nv_rd32(dev, 0x610490));
161 return -EBUSY;
162 }
163 nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
164 nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
165
166 /* init cursors */
167 for (i = 13; i <= 14; i++) {
168 nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
169 if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
170 NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
171 nv_rd32(dev, 0x610490 + (i * 0x10)));
172 return -EBUSY;
173 }
174
175 nv_mask(dev, 0x610090, 1 << i, 1 << i);
176 nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
177 }
178
179 return 0;
180}
181
182void
183nvd0_display_destroy(struct drm_device *dev)
184{
185 struct drm_nouveau_private *dev_priv = dev->dev_private;
186 struct nvd0_display *disp = nvd0_display(dev);
Ben Skeggs51beb422011-07-05 10:33:08 +1000187 struct pci_dev *pdev = dev->pdev;
Ben Skeggs26f6d882011-07-04 16:25:18 +1000188
189 nvd0_display_fini(dev);
190
Ben Skeggs51beb422011-07-05 10:33:08 +1000191 pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
Ben Skeggs26f6d882011-07-04 16:25:18 +1000192 nouveau_gpuobj_ref(NULL, &disp->mem);
Ben Skeggs51beb422011-07-05 10:33:08 +1000193
194 dev_priv->engine.display.priv = NULL;
Ben Skeggs26f6d882011-07-04 16:25:18 +1000195 kfree(disp);
196}
197
198int
199nvd0_display_create(struct drm_device *dev)
200{
201 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs51beb422011-07-05 10:33:08 +1000202 struct pci_dev *pdev = dev->pdev;
Ben Skeggs26f6d882011-07-04 16:25:18 +1000203 struct nvd0_display *disp;
204 int ret;
205
206 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
207 if (!disp)
208 return -ENOMEM;
209 dev_priv->engine.display.priv = disp;
210
Ben Skeggs51beb422011-07-05 10:33:08 +1000211 /* hash table and dma objects for the memory areas we care about */
212 ret = nouveau_gpuobj_new(dev, NULL, 4 * 1024, 0x1000, 0, &disp->mem);
Ben Skeggs26f6d882011-07-04 16:25:18 +1000213 if (ret)
214 goto out;
215
Ben Skeggs51beb422011-07-05 10:33:08 +1000216 /* push buffers for evo channels */
217 disp->evo[0].ptr =
218 pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
219 if (!disp->evo[0].ptr) {
220 ret = -ENOMEM;
221 goto out;
222 }
223
Ben Skeggs26f6d882011-07-04 16:25:18 +1000224 ret = nvd0_display_init(dev);
225 if (ret)
226 goto out;
227
228out:
229 if (ret)
230 nvd0_display_destroy(dev);
231 return ret;
232}