blob: e7660b175de661116f0d209ac66adfcef7f0a567 [file] [log] [blame]
Ben Skeggs1262a202011-07-18 15:15:34 +10001/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_bios.h"
28#include "nouveau_pm.h"
29#include "nouveau_hw.h"
30
31#define min2(a,b) ((a) < (b) ? (a) : (b))
32
33static u32
34read_pll_1(struct drm_device *dev, u32 reg)
35{
36 u32 ctrl = nv_rd32(dev, reg + 0x00);
37 int P = (ctrl & 0x00070000) >> 16;
38 int N = (ctrl & 0x0000ff00) >> 8;
39 int M = (ctrl & 0x000000ff) >> 0;
40 u32 ref = 27000, clk = 0;
41
42 if (ctrl & 0x80000000)
43 clk = ref * N / M;
44
45 return clk >> P;
46}
47
48static u32
49read_pll_2(struct drm_device *dev, u32 reg)
50{
51 u32 ctrl = nv_rd32(dev, reg + 0x00);
52 u32 coef = nv_rd32(dev, reg + 0x04);
53 int N2 = (coef & 0xff000000) >> 24;
54 int M2 = (coef & 0x00ff0000) >> 16;
55 int N1 = (coef & 0x0000ff00) >> 8;
56 int M1 = (coef & 0x000000ff) >> 0;
57 int P = (ctrl & 0x00070000) >> 16;
58 u32 ref = 27000, clk = 0;
59
Ben Skeggs2bfa7482011-10-19 14:06:59 +100060 if ((ctrl & 0x80000000) && M1) {
Ben Skeggs1262a202011-07-18 15:15:34 +100061 clk = ref * N1 / M1;
Ben Skeggs2bfa7482011-10-19 14:06:59 +100062 if ((ctrl & 0x40000100) == 0x40000000) {
63 if (M2)
64 clk = clk * N2 / M2;
65 else
66 clk = 0;
67 }
Ben Skeggs1262a202011-07-18 15:15:34 +100068 }
69
70 return clk >> P;
71}
72
73static u32
74read_clk(struct drm_device *dev, u32 src)
75{
76 switch (src) {
77 case 3:
78 return read_pll_2(dev, 0x004000);
79 case 2:
80 return read_pll_1(dev, 0x004008);
81 default:
82 break;
83 }
84
85 return 0;
86}
87
88int
89nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
90{
91 u32 ctrl = nv_rd32(dev, 0x00c040);
92
93 perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0);
94 perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
95 perflvl->memory = read_pll_2(dev, 0x4020);
96 return 0;
97}
98
99struct nv40_pm_state {
100 u32 ctrl;
101 u32 npll_ctrl;
102 u32 npll_coef;
103 u32 spll;
104 u32 mpll_ctrl;
105 u32 mpll_coef;
106};
107
108static int
109nv40_calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
110 u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
111{
112 struct nouveau_pll_vals coef;
113 int ret;
114
115 ret = get_pll_limits(dev, reg, pll);
116 if (ret)
117 return ret;
118
119 if (clk < pll->vco1.maxfreq)
120 pll->vco2.maxfreq = 0;
121
122 ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
123 if (ret == 0)
124 return -ERANGE;
125
126 *N1 = coef.N1;
127 *M1 = coef.M1;
128 if (N2 && M2) {
129 if (pll->vco2.maxfreq) {
130 *N2 = coef.N2;
131 *M2 = coef.M2;
132 } else {
133 *N2 = 1;
134 *M2 = 1;
135 }
136 }
137 *log2P = coef.log2P;
138 return 0;
139}
140
141void *
142nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
143{
144 struct nv40_pm_state *info;
145 struct pll_lims pll;
146 int N1, N2, M1, M2, log2P;
147 int ret;
148
149 info = kmalloc(sizeof(*info), GFP_KERNEL);
150 if (!info)
151 return ERR_PTR(-ENOMEM);
152
153 /* core/geometric clock */
154 ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
155 &N1, &M1, &N2, &M2, &log2P);
156 if (ret < 0)
157 goto out;
158
159 if (N2 == M2) {
160 info->npll_ctrl = 0x80000100 | (log2P << 16);
161 info->npll_coef = (N1 << 8) | M1;
162 } else {
163 info->npll_ctrl = 0xc0000000 | (log2P << 16);
164 info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
165 }
166
167 /* use the second PLL for shader/rop clock, if it differs from core */
168 if (perflvl->shader && perflvl->shader != perflvl->core) {
169 ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
170 &N1, &M1, NULL, NULL, &log2P);
171 if (ret < 0)
172 goto out;
173
174 info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
175 info->ctrl = 0x00000223;
176 } else {
177 info->spll = 0x00000000;
178 info->ctrl = 0x00000333;
179 }
180
181 /* memory clock */
Ben Skeggs2bfa7482011-10-19 14:06:59 +1000182 if (!perflvl->memory) {
183 info->mpll_ctrl = 0x00000000;
184 goto out;
185 }
186
Ben Skeggs1262a202011-07-18 15:15:34 +1000187 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
188 &N1, &M1, &N2, &M2, &log2P);
189 if (ret < 0)
190 goto out;
191
192 info->mpll_ctrl = 0x80000000 | (log2P << 16);
193 info->mpll_ctrl |= min2(pll.log2p_bias + log2P, pll.max_log2p) << 20;
194 if (N2 == M2) {
195 info->mpll_ctrl |= 0x00000100;
196 info->mpll_coef = (N1 << 8) | M1;
197 } else {
198 info->mpll_ctrl |= 0x40000000;
199 info->mpll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
200 }
201
202out:
203 if (ret < 0) {
204 kfree(info);
205 info = ERR_PTR(ret);
206 }
207 return info;
208}
209
210static bool
211nv40_pm_gr_idle(void *data)
212{
213 struct drm_device *dev = data;
214
215 if ((nv_rd32(dev, 0x400760) & 0x000000f0) >> 4 !=
216 (nv_rd32(dev, 0x400760) & 0x0000000f))
217 return false;
218
219 if (nv_rd32(dev, 0x400700))
220 return false;
221
222 return true;
223}
224
225void
226nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
227{
228 struct drm_nouveau_private *dev_priv = dev->dev_private;
229 struct nv40_pm_state *info = pre_state;
230 unsigned long flags;
Ben Skeggs59ef9742011-08-12 10:05:43 +1000231 struct bit_entry M;
Ben Skeggs1262a202011-07-18 15:15:34 +1000232 u32 crtc_mask = 0;
233 u8 sr1[2];
234 int i;
235
236 /* determine which CRTCs are active, fetch VGA_SR1 for each */
237 for (i = 0; i < 2; i++) {
238 u32 vbl = nv_rd32(dev, 0x600808 + (i * 0x2000));
239 u32 cnt = 0;
240 do {
241 if (vbl != nv_rd32(dev, 0x600808 + (i * 0x2000))) {
242 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
243 sr1[i] = nv_rd08(dev, 0x0c03c5 + (i * 0x2000));
244 if (!(sr1[i] & 0x20))
245 crtc_mask |= (1 << i);
246 break;
247 }
248 udelay(1);
249 } while (cnt++ < 32);
250 }
251
252 /* halt and idle engines */
253 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
254 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
255 if (!nv_wait(dev, 0x002500, 0x00000010, 0x00000000))
256 goto resume;
257 nv_mask(dev, 0x003220, 0x00000001, 0x00000000);
258 if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
259 goto resume;
260 nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
261 nv04_fifo_cache_pull(dev, false);
262
263 if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
264 goto resume;
265
266 /* set engine clocks */
267 nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
268 nv_wr32(dev, 0x004004, info->npll_coef);
269 nv_mask(dev, 0x004000, 0xc0070100, info->npll_ctrl);
270 nv_mask(dev, 0x004008, 0xc007ffff, info->spll);
271 mdelay(5);
272 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
273
Ben Skeggs2bfa7482011-10-19 14:06:59 +1000274 if (!info->mpll_ctrl)
275 goto resume;
276
Ben Skeggs1262a202011-07-18 15:15:34 +1000277 /* wait for vblank start on active crtcs, disable memory access */
278 for (i = 0; i < 2; i++) {
279 if (!(crtc_mask & (1 << i)))
280 continue;
281 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
282 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
283 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
284 nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
285 }
286
287 /* prepare ram for reclocking */
288 nv_wr32(dev, 0x1002d4, 0x00000001); /* precharge */
289 nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
290 nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
291 nv_mask(dev, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
292 nv_wr32(dev, 0x1002dc, 0x00000001); /* enable self-refresh */
293
294 /* change the PLL of each memory partition */
295 nv_mask(dev, 0x00c040, 0x0000c000, 0x00000000);
296 switch (dev_priv->chipset) {
297 case 0x40:
298 case 0x45:
299 case 0x41:
300 case 0x42:
301 case 0x47:
302 nv_mask(dev, 0x004044, 0xc0771100, info->mpll_ctrl);
303 nv_mask(dev, 0x00402c, 0xc0771100, info->mpll_ctrl);
304 nv_wr32(dev, 0x004048, info->mpll_coef);
305 nv_wr32(dev, 0x004030, info->mpll_coef);
306 case 0x43:
307 case 0x49:
308 case 0x4b:
309 nv_mask(dev, 0x004038, 0xc0771100, info->mpll_ctrl);
310 nv_wr32(dev, 0x00403c, info->mpll_coef);
311 default:
312 nv_mask(dev, 0x004020, 0xc0771100, info->mpll_ctrl);
313 nv_wr32(dev, 0x004024, info->mpll_coef);
314 break;
315 }
316 udelay(100);
317 nv_mask(dev, 0x00c040, 0x0000c000, 0x0000c000);
318
319 /* re-enable normal operation of memory controller */
320 nv_wr32(dev, 0x1002dc, 0x00000000);
321 nv_mask(dev, 0x100210, 0x80000000, 0x80000000);
322 udelay(100);
323
Ben Skeggs59ef9742011-08-12 10:05:43 +1000324 /* execute memory reset script from vbios */
325 if (!bit_table(dev, 'M', &M))
326 nouveau_bios_init_exec(dev, ROM16(M.data[0]));
327
Ben Skeggs1262a202011-07-18 15:15:34 +1000328 /* make sure we're in vblank (hopefully the same one as before), and
329 * then re-enable crtc memory access
330 */
331 for (i = 0; i < 2; i++) {
332 if (!(crtc_mask & (1 << i)))
333 continue;
334 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
335 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
336 nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i]);
337 }
338
339 /* resume engines */
340resume:
341 nv_wr32(dev, 0x003250, 0x00000001);
342 nv_mask(dev, 0x003220, 0x00000001, 0x00000001);
343 nv_wr32(dev, 0x003200, 0x00000001);
344 nv_wr32(dev, 0x002500, 0x00000001);
345 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
346
347 kfree(info);
348}
Ben Skeggs92329692011-07-28 10:40:48 +1000349
350int
351nv40_pm_fanspeed_get(struct drm_device *dev)
352{
353 u32 reg = nv_rd32(dev, 0x0010f0);
354 if (reg & 0x80000000) {
355 u32 duty = (reg & 0x7fff0000) >> 16;
356 u32 divs = (reg & 0x00007fff);
357 if (divs && divs >= duty)
358 return ((divs - duty) * 100) / divs;
359 }
360
361 return 100;
362}
363
364int
365nv40_pm_fanspeed_set(struct drm_device *dev, int percent)
366{
367 struct drm_nouveau_private *dev_priv = dev->dev_private;
368 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
369 u32 divs = pm->pwm_divisor;
370 u32 duty = ((100 - percent) * divs) / 100;
371
372 nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
373 return 0;
374}
Ben Skeggs04de6a02011-07-28 10:52:13 +1000375
376int
377nv41_pm_fanspeed_get(struct drm_device *dev)
378{
379 u32 reg = nv_rd32(dev, 0x0015f4);
380 if (reg & 0x80000000) {
381 u32 divs = nv_rd32(dev, 0x0015f8);
382 u32 duty = (reg & 0x7fffffff);
383 if (divs && divs >= duty)
384 return ((divs - duty) * 100) / divs;
385 }
386
387 return 100;
388}
389
390int
391nv41_pm_fanspeed_set(struct drm_device *dev, int percent)
392{
393 struct drm_nouveau_private *dev_priv = dev->dev_private;
394 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
395 u32 divs = pm->pwm_divisor;
396 u32 duty = ((100 - percent) * divs) / 100;
397
398 nv_wr32(dev, 0x0015f8, divs);
399 nv_wr32(dev, 0x0015f4, duty | 0x80000000);
400 return 0;
401}