1 /*
2  * Copyright 2011 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include "drmP.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_bios.h"
28 #include "nouveau_pm.h"
29 #include "nouveau_hw.h"
30 
31 #define min2(a,b) ((a) < (b) ? (a) : (b))
32 
33 static u32
read_pll_1(struct drm_device * dev,u32 reg)34 read_pll_1(struct drm_device *dev, u32 reg)
35 {
36 	u32 ctrl = nv_rd32(dev, reg + 0x00);
37 	int P = (ctrl & 0x00070000) >> 16;
38 	int N = (ctrl & 0x0000ff00) >> 8;
39 	int M = (ctrl & 0x000000ff) >> 0;
40 	u32 ref = 27000, clk = 0;
41 
42 	if (ctrl & 0x80000000)
43 		clk = ref * N / M;
44 
45 	return clk >> P;
46 }
47 
48 static u32
read_pll_2(struct drm_device * dev,u32 reg)49 read_pll_2(struct drm_device *dev, u32 reg)
50 {
51 	u32 ctrl = nv_rd32(dev, reg + 0x00);
52 	u32 coef = nv_rd32(dev, reg + 0x04);
53 	int N2 = (coef & 0xff000000) >> 24;
54 	int M2 = (coef & 0x00ff0000) >> 16;
55 	int N1 = (coef & 0x0000ff00) >> 8;
56 	int M1 = (coef & 0x000000ff) >> 0;
57 	int P = (ctrl & 0x00070000) >> 16;
58 	u32 ref = 27000, clk = 0;
59 
60 	if ((ctrl & 0x80000000) && M1) {
61 		clk = ref * N1 / M1;
62 		if ((ctrl & 0x40000100) == 0x40000000) {
63 			if (M2)
64 				clk = clk * N2 / M2;
65 			else
66 				clk = 0;
67 		}
68 	}
69 
70 	return clk >> P;
71 }
72 
73 static u32
read_clk(struct drm_device * dev,u32 src)74 read_clk(struct drm_device *dev, u32 src)
75 {
76 	switch (src) {
77 	case 3:
78 		return read_pll_2(dev, 0x004000);
79 	case 2:
80 		return read_pll_1(dev, 0x004008);
81 	default:
82 		break;
83 	}
84 
85 	return 0;
86 }
87 
88 int
nv40_pm_clocks_get(struct drm_device * dev,struct nouveau_pm_level * perflvl)89 nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
90 {
91 	u32 ctrl = nv_rd32(dev, 0x00c040);
92 
93 	perflvl->core   = read_clk(dev, (ctrl & 0x00000003) >> 0);
94 	perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
95 	perflvl->memory = read_pll_2(dev, 0x4020);
96 	return 0;
97 }
98 
99 struct nv40_pm_state {
100 	u32 ctrl;
101 	u32 npll_ctrl;
102 	u32 npll_coef;
103 	u32 spll;
104 	u32 mpll_ctrl;
105 	u32 mpll_coef;
106 };
107 
108 static int
nv40_calc_pll(struct drm_device * dev,u32 reg,struct pll_lims * pll,u32 clk,int * N1,int * M1,int * N2,int * M2,int * log2P)109 nv40_calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
110 	      u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
111 {
112 	struct nouveau_pll_vals coef;
113 	int ret;
114 
115 	ret = get_pll_limits(dev, reg, pll);
116 	if (ret)
117 		return ret;
118 
119 	if (clk < pll->vco1.maxfreq)
120 		pll->vco2.maxfreq = 0;
121 
122 	ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
123 	if (ret == 0)
124 		return -ERANGE;
125 
126 	*N1 = coef.N1;
127 	*M1 = coef.M1;
128 	if (N2 && M2) {
129 		if (pll->vco2.maxfreq) {
130 			*N2 = coef.N2;
131 			*M2 = coef.M2;
132 		} else {
133 			*N2 = 1;
134 			*M2 = 1;
135 		}
136 	}
137 	*log2P = coef.log2P;
138 	return 0;
139 }
140 
141 void *
nv40_pm_clocks_pre(struct drm_device * dev,struct nouveau_pm_level * perflvl)142 nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
143 {
144 	struct nv40_pm_state *info;
145 	struct pll_lims pll;
146 	int N1, N2, M1, M2, log2P;
147 	int ret;
148 
149 	info = kmalloc(sizeof(*info), GFP_KERNEL);
150 	if (!info)
151 		return ERR_PTR(-ENOMEM);
152 
153 	/* core/geometric clock */
154 	ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
155 			    &N1, &M1, &N2, &M2, &log2P);
156 	if (ret < 0)
157 		goto out;
158 
159 	if (N2 == M2) {
160 		info->npll_ctrl = 0x80000100 | (log2P << 16);
161 		info->npll_coef = (N1 << 8) | M1;
162 	} else {
163 		info->npll_ctrl = 0xc0000000 | (log2P << 16);
164 		info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
165 	}
166 
167 	/* use the second PLL for shader/rop clock, if it differs from core */
168 	if (perflvl->shader && perflvl->shader != perflvl->core) {
169 		ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
170 				    &N1, &M1, NULL, NULL, &log2P);
171 		if (ret < 0)
172 			goto out;
173 
174 		info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
175 		info->ctrl = 0x00000223;
176 	} else {
177 		info->spll = 0x00000000;
178 		info->ctrl = 0x00000333;
179 	}
180 
181 	/* memory clock */
182 	if (!perflvl->memory) {
183 		info->mpll_ctrl = 0x00000000;
184 		goto out;
185 	}
186 
187 	ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
188 			    &N1, &M1, &N2, &M2, &log2P);
189 	if (ret < 0)
190 		goto out;
191 
192 	info->mpll_ctrl  = 0x80000000 | (log2P << 16);
193 	info->mpll_ctrl |= min2(pll.log2p_bias + log2P, pll.max_log2p) << 20;
194 	if (N2 == M2) {
195 		info->mpll_ctrl |= 0x00000100;
196 		info->mpll_coef  = (N1 << 8) | M1;
197 	} else {
198 		info->mpll_ctrl |= 0x40000000;
199 		info->mpll_coef  = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
200 	}
201 
202 out:
203 	if (ret < 0) {
204 		kfree(info);
205 		info = ERR_PTR(ret);
206 	}
207 	return info;
208 }
209 
210 static bool
nv40_pm_gr_idle(void * data)211 nv40_pm_gr_idle(void *data)
212 {
213 	struct drm_device *dev = data;
214 
215 	if ((nv_rd32(dev, 0x400760) & 0x000000f0) >> 4 !=
216 	    (nv_rd32(dev, 0x400760) & 0x0000000f))
217 		return false;
218 
219 	if (nv_rd32(dev, 0x400700))
220 		return false;
221 
222 	return true;
223 }
224 
225 int
nv40_pm_clocks_set(struct drm_device * dev,void * pre_state)226 nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
227 {
228 	struct drm_nouveau_private *dev_priv = dev->dev_private;
229 	struct nv40_pm_state *info = pre_state;
230 	unsigned long flags;
231 	struct bit_entry M;
232 	u32 crtc_mask = 0;
233 	u8 sr1[2];
234 	int i, ret = -EAGAIN;
235 
236 	/* determine which CRTCs are active, fetch VGA_SR1 for each */
237 	for (i = 0; i < 2; i++) {
238 		u32 vbl = nv_rd32(dev, 0x600808 + (i * 0x2000));
239 		u32 cnt = 0;
240 		do {
241 			if (vbl != nv_rd32(dev, 0x600808 + (i * 0x2000))) {
242 				nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
243 				sr1[i] = nv_rd08(dev, 0x0c03c5 + (i * 0x2000));
244 				if (!(sr1[i] & 0x20))
245 					crtc_mask |= (1 << i);
246 				break;
247 			}
248 			udelay(1);
249 		} while (cnt++ < 32);
250 	}
251 
252 	/* halt and idle engines */
253 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
254 	nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
255 	if (!nv_wait(dev, 0x002500, 0x00000010, 0x00000000))
256 		goto resume;
257 	nv_mask(dev, 0x003220, 0x00000001, 0x00000000);
258 	if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
259 		goto resume;
260 	nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
261 	nv04_fifo_cache_pull(dev, false);
262 
263 	if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
264 		goto resume;
265 
266 	ret = 0;
267 
268 	/* set engine clocks */
269 	nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
270 	nv_wr32(dev, 0x004004, info->npll_coef);
271 	nv_mask(dev, 0x004000, 0xc0070100, info->npll_ctrl);
272 	nv_mask(dev, 0x004008, 0xc007ffff, info->spll);
273 	mdelay(5);
274 	nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
275 
276 	if (!info->mpll_ctrl)
277 		goto resume;
278 
279 	/* wait for vblank start on active crtcs, disable memory access */
280 	for (i = 0; i < 2; i++) {
281 		if (!(crtc_mask & (1 << i)))
282 			continue;
283 		nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
284 		nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
285 		nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
286 		nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
287 	}
288 
289 	/* prepare ram for reclocking */
290 	nv_wr32(dev, 0x1002d4, 0x00000001); /* precharge */
291 	nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
292 	nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
293 	nv_mask(dev, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
294 	nv_wr32(dev, 0x1002dc, 0x00000001); /* enable self-refresh */
295 
296 	/* change the PLL of each memory partition */
297 	nv_mask(dev, 0x00c040, 0x0000c000, 0x00000000);
298 	switch (dev_priv->chipset) {
299 	case 0x40:
300 	case 0x45:
301 	case 0x41:
302 	case 0x42:
303 	case 0x47:
304 		nv_mask(dev, 0x004044, 0xc0771100, info->mpll_ctrl);
305 		nv_mask(dev, 0x00402c, 0xc0771100, info->mpll_ctrl);
306 		nv_wr32(dev, 0x004048, info->mpll_coef);
307 		nv_wr32(dev, 0x004030, info->mpll_coef);
308 	case 0x43:
309 	case 0x49:
310 	case 0x4b:
311 		nv_mask(dev, 0x004038, 0xc0771100, info->mpll_ctrl);
312 		nv_wr32(dev, 0x00403c, info->mpll_coef);
313 	default:
314 		nv_mask(dev, 0x004020, 0xc0771100, info->mpll_ctrl);
315 		nv_wr32(dev, 0x004024, info->mpll_coef);
316 		break;
317 	}
318 	udelay(100);
319 	nv_mask(dev, 0x00c040, 0x0000c000, 0x0000c000);
320 
321 	/* re-enable normal operation of memory controller */
322 	nv_wr32(dev, 0x1002dc, 0x00000000);
323 	nv_mask(dev, 0x100210, 0x80000000, 0x80000000);
324 	udelay(100);
325 
326 	/* execute memory reset script from vbios */
327 	if (!bit_table(dev, 'M', &M))
328 		nouveau_bios_init_exec(dev, ROM16(M.data[0]));
329 
330 	/* make sure we're in vblank (hopefully the same one as before), and
331 	 * then re-enable crtc memory access
332 	 */
333 	for (i = 0; i < 2; i++) {
334 		if (!(crtc_mask & (1 << i)))
335 			continue;
336 		nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
337 		nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
338 		nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i]);
339 	}
340 
341 	/* resume engines */
342 resume:
343 	nv_wr32(dev, 0x003250, 0x00000001);
344 	nv_mask(dev, 0x003220, 0x00000001, 0x00000001);
345 	nv_wr32(dev, 0x003200, 0x00000001);
346 	nv_wr32(dev, 0x002500, 0x00000001);
347 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
348 
349 	kfree(info);
350 	return ret;
351 }
352 
353 int
nv40_pm_pwm_get(struct drm_device * dev,int line,u32 * divs,u32 * duty)354 nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
355 {
356 	if (line == 2) {
357 		u32 reg = nv_rd32(dev, 0x0010f0);
358 		if (reg & 0x80000000) {
359 			*duty = (reg & 0x7fff0000) >> 16;
360 			*divs = (reg & 0x00007fff);
361 			return 0;
362 		}
363 	} else
364 	if (line == 9) {
365 		u32 reg = nv_rd32(dev, 0x0015f4);
366 		if (reg & 0x80000000) {
367 			*divs = nv_rd32(dev, 0x0015f8);
368 			*duty = (reg & 0x7fffffff);
369 			return 0;
370 		}
371 	} else {
372 		NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
373 		return -ENODEV;
374 	}
375 
376 	return -EINVAL;
377 }
378 
379 int
nv40_pm_pwm_set(struct drm_device * dev,int line,u32 divs,u32 duty)380 nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
381 {
382 	if (line == 2) {
383 		nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
384 	} else
385 	if (line == 9) {
386 		nv_wr32(dev, 0x0015f8, divs);
387 		nv_wr32(dev, 0x0015f4, duty | 0x80000000);
388 	} else {
389 		NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
390 		return -ENODEV;
391 	}
392 
393 	return 0;
394 }
395