1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/cpufreq.h>
28 #include <linux/dmi.h>
29 #include <linux/module.h>
30 #include <linux/input.h>
31 #include <linux/i2c.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35 #include <drm/drm_edid.h>
36 #include "drmP.h"
37 #include "intel_drv.h"
38 #include "i915_drm.h"
39 #include "i915_drv.h"
40 #include "i915_trace.h"
41 #include "drm_dp_helper.h"
42 #include "drm_crtc_helper.h"
43 #include <linux/dma_remapping.h>
44 
45 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
46 
47 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
48 static void intel_update_watermarks(struct drm_device *dev);
49 static void intel_increase_pllclock(struct drm_crtc *crtc);
50 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
51 
52 typedef struct {
53 	/* given values */
54 	int n;
55 	int m1, m2;
56 	int p1, p2;
57 	/* derived values */
58 	int	dot;
59 	int	vco;
60 	int	m;
61 	int	p;
62 } intel_clock_t;
63 
64 typedef struct {
65 	int	min, max;
66 } intel_range_t;
67 
68 typedef struct {
69 	int	dot_limit;
70 	int	p2_slow, p2_fast;
71 } intel_p2_t;
72 
73 #define INTEL_P2_NUM		      2
74 typedef struct intel_limit intel_limit_t;
75 struct intel_limit {
76 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
77 	intel_p2_t	    p2;
78 	bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
79 			int, int, intel_clock_t *, intel_clock_t *);
80 };
81 
82 /* FDI */
83 #define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
84 
85 static bool
86 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
87 		    int target, int refclk, intel_clock_t *match_clock,
88 		    intel_clock_t *best_clock);
89 static bool
90 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
91 			int target, int refclk, intel_clock_t *match_clock,
92 			intel_clock_t *best_clock);
93 
94 static bool
95 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
96 		      int target, int refclk, intel_clock_t *match_clock,
97 		      intel_clock_t *best_clock);
98 static bool
99 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
100 			   int target, int refclk, intel_clock_t *match_clock,
101 			   intel_clock_t *best_clock);
102 
103 static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device * dev)104 intel_fdi_link_freq(struct drm_device *dev)
105 {
106 	if (IS_GEN5(dev)) {
107 		struct drm_i915_private *dev_priv = dev->dev_private;
108 		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
109 	} else
110 		return 27;
111 }
112 
113 static const intel_limit_t intel_limits_i8xx_dvo = {
114 	.dot = { .min = 25000, .max = 350000 },
115 	.vco = { .min = 930000, .max = 1400000 },
116 	.n = { .min = 3, .max = 16 },
117 	.m = { .min = 96, .max = 140 },
118 	.m1 = { .min = 18, .max = 26 },
119 	.m2 = { .min = 6, .max = 16 },
120 	.p = { .min = 4, .max = 128 },
121 	.p1 = { .min = 2, .max = 33 },
122 	.p2 = { .dot_limit = 165000,
123 		.p2_slow = 4, .p2_fast = 2 },
124 	.find_pll = intel_find_best_PLL,
125 };
126 
127 static const intel_limit_t intel_limits_i8xx_lvds = {
128 	.dot = { .min = 25000, .max = 350000 },
129 	.vco = { .min = 930000, .max = 1400000 },
130 	.n = { .min = 3, .max = 16 },
131 	.m = { .min = 96, .max = 140 },
132 	.m1 = { .min = 18, .max = 26 },
133 	.m2 = { .min = 6, .max = 16 },
134 	.p = { .min = 4, .max = 128 },
135 	.p1 = { .min = 1, .max = 6 },
136 	.p2 = { .dot_limit = 165000,
137 		.p2_slow = 14, .p2_fast = 7 },
138 	.find_pll = intel_find_best_PLL,
139 };
140 
141 static const intel_limit_t intel_limits_i9xx_sdvo = {
142 	.dot = { .min = 20000, .max = 400000 },
143 	.vco = { .min = 1400000, .max = 2800000 },
144 	.n = { .min = 1, .max = 6 },
145 	.m = { .min = 70, .max = 120 },
146 	.m1 = { .min = 8, .max = 18 },
147 	.m2 = { .min = 3, .max = 7 },
148 	.p = { .min = 5, .max = 80 },
149 	.p1 = { .min = 1, .max = 8 },
150 	.p2 = { .dot_limit = 200000,
151 		.p2_slow = 10, .p2_fast = 5 },
152 	.find_pll = intel_find_best_PLL,
153 };
154 
155 static const intel_limit_t intel_limits_i9xx_lvds = {
156 	.dot = { .min = 20000, .max = 400000 },
157 	.vco = { .min = 1400000, .max = 2800000 },
158 	.n = { .min = 1, .max = 6 },
159 	.m = { .min = 70, .max = 120 },
160 	.m1 = { .min = 10, .max = 22 },
161 	.m2 = { .min = 5, .max = 9 },
162 	.p = { .min = 7, .max = 98 },
163 	.p1 = { .min = 1, .max = 8 },
164 	.p2 = { .dot_limit = 112000,
165 		.p2_slow = 14, .p2_fast = 7 },
166 	.find_pll = intel_find_best_PLL,
167 };
168 
169 
170 static const intel_limit_t intel_limits_g4x_sdvo = {
171 	.dot = { .min = 25000, .max = 270000 },
172 	.vco = { .min = 1750000, .max = 3500000},
173 	.n = { .min = 1, .max = 4 },
174 	.m = { .min = 104, .max = 138 },
175 	.m1 = { .min = 17, .max = 23 },
176 	.m2 = { .min = 5, .max = 11 },
177 	.p = { .min = 10, .max = 30 },
178 	.p1 = { .min = 1, .max = 3},
179 	.p2 = { .dot_limit = 270000,
180 		.p2_slow = 10,
181 		.p2_fast = 10
182 	},
183 	.find_pll = intel_g4x_find_best_PLL,
184 };
185 
186 static const intel_limit_t intel_limits_g4x_hdmi = {
187 	.dot = { .min = 22000, .max = 400000 },
188 	.vco = { .min = 1750000, .max = 3500000},
189 	.n = { .min = 1, .max = 4 },
190 	.m = { .min = 104, .max = 138 },
191 	.m1 = { .min = 16, .max = 23 },
192 	.m2 = { .min = 5, .max = 11 },
193 	.p = { .min = 5, .max = 80 },
194 	.p1 = { .min = 1, .max = 8},
195 	.p2 = { .dot_limit = 165000,
196 		.p2_slow = 10, .p2_fast = 5 },
197 	.find_pll = intel_g4x_find_best_PLL,
198 };
199 
200 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
201 	.dot = { .min = 20000, .max = 115000 },
202 	.vco = { .min = 1750000, .max = 3500000 },
203 	.n = { .min = 1, .max = 3 },
204 	.m = { .min = 104, .max = 138 },
205 	.m1 = { .min = 17, .max = 23 },
206 	.m2 = { .min = 5, .max = 11 },
207 	.p = { .min = 28, .max = 112 },
208 	.p1 = { .min = 2, .max = 8 },
209 	.p2 = { .dot_limit = 0,
210 		.p2_slow = 14, .p2_fast = 14
211 	},
212 	.find_pll = intel_g4x_find_best_PLL,
213 };
214 
215 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
216 	.dot = { .min = 80000, .max = 224000 },
217 	.vco = { .min = 1750000, .max = 3500000 },
218 	.n = { .min = 1, .max = 3 },
219 	.m = { .min = 104, .max = 138 },
220 	.m1 = { .min = 17, .max = 23 },
221 	.m2 = { .min = 5, .max = 11 },
222 	.p = { .min = 14, .max = 42 },
223 	.p1 = { .min = 2, .max = 6 },
224 	.p2 = { .dot_limit = 0,
225 		.p2_slow = 7, .p2_fast = 7
226 	},
227 	.find_pll = intel_g4x_find_best_PLL,
228 };
229 
230 static const intel_limit_t intel_limits_g4x_display_port = {
231 	.dot = { .min = 161670, .max = 227000 },
232 	.vco = { .min = 1750000, .max = 3500000},
233 	.n = { .min = 1, .max = 2 },
234 	.m = { .min = 97, .max = 108 },
235 	.m1 = { .min = 0x10, .max = 0x12 },
236 	.m2 = { .min = 0x05, .max = 0x06 },
237 	.p = { .min = 10, .max = 20 },
238 	.p1 = { .min = 1, .max = 2},
239 	.p2 = { .dot_limit = 0,
240 		.p2_slow = 10, .p2_fast = 10 },
241 	.find_pll = intel_find_pll_g4x_dp,
242 };
243 
244 static const intel_limit_t intel_limits_pineview_sdvo = {
245 	.dot = { .min = 20000, .max = 400000},
246 	.vco = { .min = 1700000, .max = 3500000 },
247 	/* Pineview's Ncounter is a ring counter */
248 	.n = { .min = 3, .max = 6 },
249 	.m = { .min = 2, .max = 256 },
250 	/* Pineview only has one combined m divider, which we treat as m2. */
251 	.m1 = { .min = 0, .max = 0 },
252 	.m2 = { .min = 0, .max = 254 },
253 	.p = { .min = 5, .max = 80 },
254 	.p1 = { .min = 1, .max = 8 },
255 	.p2 = { .dot_limit = 200000,
256 		.p2_slow = 10, .p2_fast = 5 },
257 	.find_pll = intel_find_best_PLL,
258 };
259 
260 static const intel_limit_t intel_limits_pineview_lvds = {
261 	.dot = { .min = 20000, .max = 400000 },
262 	.vco = { .min = 1700000, .max = 3500000 },
263 	.n = { .min = 3, .max = 6 },
264 	.m = { .min = 2, .max = 256 },
265 	.m1 = { .min = 0, .max = 0 },
266 	.m2 = { .min = 0, .max = 254 },
267 	.p = { .min = 7, .max = 112 },
268 	.p1 = { .min = 1, .max = 8 },
269 	.p2 = { .dot_limit = 112000,
270 		.p2_slow = 14, .p2_fast = 14 },
271 	.find_pll = intel_find_best_PLL,
272 };
273 
274 /* Ironlake / Sandybridge
275  *
276  * We calculate clock using (register_value + 2) for N/M1/M2, so here
277  * the range value for them is (actual_value - 2).
278  */
279 static const intel_limit_t intel_limits_ironlake_dac = {
280 	.dot = { .min = 25000, .max = 350000 },
281 	.vco = { .min = 1760000, .max = 3510000 },
282 	.n = { .min = 1, .max = 5 },
283 	.m = { .min = 79, .max = 127 },
284 	.m1 = { .min = 12, .max = 22 },
285 	.m2 = { .min = 5, .max = 9 },
286 	.p = { .min = 5, .max = 80 },
287 	.p1 = { .min = 1, .max = 8 },
288 	.p2 = { .dot_limit = 225000,
289 		.p2_slow = 10, .p2_fast = 5 },
290 	.find_pll = intel_g4x_find_best_PLL,
291 };
292 
293 static const intel_limit_t intel_limits_ironlake_single_lvds = {
294 	.dot = { .min = 25000, .max = 350000 },
295 	.vco = { .min = 1760000, .max = 3510000 },
296 	.n = { .min = 1, .max = 3 },
297 	.m = { .min = 79, .max = 118 },
298 	.m1 = { .min = 12, .max = 22 },
299 	.m2 = { .min = 5, .max = 9 },
300 	.p = { .min = 28, .max = 112 },
301 	.p1 = { .min = 2, .max = 8 },
302 	.p2 = { .dot_limit = 225000,
303 		.p2_slow = 14, .p2_fast = 14 },
304 	.find_pll = intel_g4x_find_best_PLL,
305 };
306 
307 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
308 	.dot = { .min = 25000, .max = 350000 },
309 	.vco = { .min = 1760000, .max = 3510000 },
310 	.n = { .min = 1, .max = 3 },
311 	.m = { .min = 79, .max = 127 },
312 	.m1 = { .min = 12, .max = 22 },
313 	.m2 = { .min = 5, .max = 9 },
314 	.p = { .min = 14, .max = 56 },
315 	.p1 = { .min = 2, .max = 8 },
316 	.p2 = { .dot_limit = 225000,
317 		.p2_slow = 7, .p2_fast = 7 },
318 	.find_pll = intel_g4x_find_best_PLL,
319 };
320 
321 /* LVDS 100mhz refclk limits. */
322 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
323 	.dot = { .min = 25000, .max = 350000 },
324 	.vco = { .min = 1760000, .max = 3510000 },
325 	.n = { .min = 1, .max = 2 },
326 	.m = { .min = 79, .max = 126 },
327 	.m1 = { .min = 12, .max = 22 },
328 	.m2 = { .min = 5, .max = 9 },
329 	.p = { .min = 28, .max = 112 },
330 	.p1 = { .min = 2, .max = 8 },
331 	.p2 = { .dot_limit = 225000,
332 		.p2_slow = 14, .p2_fast = 14 },
333 	.find_pll = intel_g4x_find_best_PLL,
334 };
335 
336 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
337 	.dot = { .min = 25000, .max = 350000 },
338 	.vco = { .min = 1760000, .max = 3510000 },
339 	.n = { .min = 1, .max = 3 },
340 	.m = { .min = 79, .max = 126 },
341 	.m1 = { .min = 12, .max = 22 },
342 	.m2 = { .min = 5, .max = 9 },
343 	.p = { .min = 14, .max = 42 },
344 	.p1 = { .min = 2, .max = 6 },
345 	.p2 = { .dot_limit = 225000,
346 		.p2_slow = 7, .p2_fast = 7 },
347 	.find_pll = intel_g4x_find_best_PLL,
348 };
349 
350 static const intel_limit_t intel_limits_ironlake_display_port = {
351 	.dot = { .min = 25000, .max = 350000 },
352 	.vco = { .min = 1760000, .max = 3510000},
353 	.n = { .min = 1, .max = 2 },
354 	.m = { .min = 81, .max = 90 },
355 	.m1 = { .min = 12, .max = 22 },
356 	.m2 = { .min = 5, .max = 9 },
357 	.p = { .min = 10, .max = 20 },
358 	.p1 = { .min = 1, .max = 2},
359 	.p2 = { .dot_limit = 0,
360 		.p2_slow = 10, .p2_fast = 10 },
361 	.find_pll = intel_find_pll_ironlake_dp,
362 };
363 
is_dual_link_lvds(struct drm_i915_private * dev_priv,unsigned int reg)364 static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
365 			      unsigned int reg)
366 {
367 	unsigned int val;
368 
369 	if (dev_priv->lvds_val)
370 		val = dev_priv->lvds_val;
371 	else {
372 		/* BIOS should set the proper LVDS register value at boot, but
373 		 * in reality, it doesn't set the value when the lid is closed;
374 		 * we need to check "the value to be set" in VBT when LVDS
375 		 * register is uninitialized.
376 		 */
377 		val = I915_READ(reg);
378 		if (!(val & ~LVDS_DETECTED))
379 			val = dev_priv->bios_lvds_val;
380 		dev_priv->lvds_val = val;
381 	}
382 	return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
383 }
384 
intel_ironlake_limit(struct drm_crtc * crtc,int refclk)385 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
386 						int refclk)
387 {
388 	struct drm_device *dev = crtc->dev;
389 	struct drm_i915_private *dev_priv = dev->dev_private;
390 	const intel_limit_t *limit;
391 
392 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
393 		if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
394 			/* LVDS dual channel */
395 			if (refclk == 100000)
396 				limit = &intel_limits_ironlake_dual_lvds_100m;
397 			else
398 				limit = &intel_limits_ironlake_dual_lvds;
399 		} else {
400 			if (refclk == 100000)
401 				limit = &intel_limits_ironlake_single_lvds_100m;
402 			else
403 				limit = &intel_limits_ironlake_single_lvds;
404 		}
405 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
406 			HAS_eDP)
407 		limit = &intel_limits_ironlake_display_port;
408 	else
409 		limit = &intel_limits_ironlake_dac;
410 
411 	return limit;
412 }
413 
intel_g4x_limit(struct drm_crtc * crtc)414 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
415 {
416 	struct drm_device *dev = crtc->dev;
417 	struct drm_i915_private *dev_priv = dev->dev_private;
418 	const intel_limit_t *limit;
419 
420 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
421 		if (is_dual_link_lvds(dev_priv, LVDS))
422 			/* LVDS with dual channel */
423 			limit = &intel_limits_g4x_dual_channel_lvds;
424 		else
425 			/* LVDS with dual channel */
426 			limit = &intel_limits_g4x_single_channel_lvds;
427 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
428 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
429 		limit = &intel_limits_g4x_hdmi;
430 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
431 		limit = &intel_limits_g4x_sdvo;
432 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
433 		limit = &intel_limits_g4x_display_port;
434 	} else /* The option is for other outputs */
435 		limit = &intel_limits_i9xx_sdvo;
436 
437 	return limit;
438 }
439 
intel_limit(struct drm_crtc * crtc,int refclk)440 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
441 {
442 	struct drm_device *dev = crtc->dev;
443 	const intel_limit_t *limit;
444 
445 	if (HAS_PCH_SPLIT(dev))
446 		limit = intel_ironlake_limit(crtc, refclk);
447 	else if (IS_G4X(dev)) {
448 		limit = intel_g4x_limit(crtc);
449 	} else if (IS_PINEVIEW(dev)) {
450 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
451 			limit = &intel_limits_pineview_lvds;
452 		else
453 			limit = &intel_limits_pineview_sdvo;
454 	} else if (!IS_GEN2(dev)) {
455 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
456 			limit = &intel_limits_i9xx_lvds;
457 		else
458 			limit = &intel_limits_i9xx_sdvo;
459 	} else {
460 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
461 			limit = &intel_limits_i8xx_lvds;
462 		else
463 			limit = &intel_limits_i8xx_dvo;
464 	}
465 	return limit;
466 }
467 
468 /* m1 is reserved as 0 in Pineview, n is a ring counter */
pineview_clock(int refclk,intel_clock_t * clock)469 static void pineview_clock(int refclk, intel_clock_t *clock)
470 {
471 	clock->m = clock->m2 + 2;
472 	clock->p = clock->p1 * clock->p2;
473 	clock->vco = refclk * clock->m / clock->n;
474 	clock->dot = clock->vco / clock->p;
475 }
476 
intel_clock(struct drm_device * dev,int refclk,intel_clock_t * clock)477 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
478 {
479 	if (IS_PINEVIEW(dev)) {
480 		pineview_clock(refclk, clock);
481 		return;
482 	}
483 	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
484 	clock->p = clock->p1 * clock->p2;
485 	clock->vco = refclk * clock->m / (clock->n + 2);
486 	clock->dot = clock->vco / clock->p;
487 }
488 
489 /**
490  * Returns whether any output on the specified pipe is of the specified type
491  */
intel_pipe_has_type(struct drm_crtc * crtc,int type)492 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
493 {
494 	struct drm_device *dev = crtc->dev;
495 	struct drm_mode_config *mode_config = &dev->mode_config;
496 	struct intel_encoder *encoder;
497 
498 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
499 		if (encoder->base.crtc == crtc && encoder->type == type)
500 			return true;
501 
502 	return false;
503 }
504 
505 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
506 /**
507  * Returns whether the given set of divisors are valid for a given refclk with
508  * the given connectors.
509  */
510 
intel_PLL_is_valid(struct drm_device * dev,const intel_limit_t * limit,const intel_clock_t * clock)511 static bool intel_PLL_is_valid(struct drm_device *dev,
512 			       const intel_limit_t *limit,
513 			       const intel_clock_t *clock)
514 {
515 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
516 		INTELPllInvalid("p1 out of range\n");
517 	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
518 		INTELPllInvalid("p out of range\n");
519 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
520 		INTELPllInvalid("m2 out of range\n");
521 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
522 		INTELPllInvalid("m1 out of range\n");
523 	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
524 		INTELPllInvalid("m1 <= m2\n");
525 	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
526 		INTELPllInvalid("m out of range\n");
527 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
528 		INTELPllInvalid("n out of range\n");
529 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
530 		INTELPllInvalid("vco out of range\n");
531 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
532 	 * connector, etc., rather than just a single range.
533 	 */
534 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
535 		INTELPllInvalid("dot out of range\n");
536 
537 	return true;
538 }
539 
540 static bool
intel_find_best_PLL(const intel_limit_t * limit,struct drm_crtc * crtc,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)541 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
542 		    int target, int refclk, intel_clock_t *match_clock,
543 		    intel_clock_t *best_clock)
544 
545 {
546 	struct drm_device *dev = crtc->dev;
547 	struct drm_i915_private *dev_priv = dev->dev_private;
548 	intel_clock_t clock;
549 	int err = target;
550 
551 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
552 	    (I915_READ(LVDS)) != 0) {
553 		/*
554 		 * For LVDS, if the panel is on, just rely on its current
555 		 * settings for dual-channel.  We haven't figured out how to
556 		 * reliably set up different single/dual channel state, if we
557 		 * even can.
558 		 */
559 		if (is_dual_link_lvds(dev_priv, LVDS))
560 			clock.p2 = limit->p2.p2_fast;
561 		else
562 			clock.p2 = limit->p2.p2_slow;
563 	} else {
564 		if (target < limit->p2.dot_limit)
565 			clock.p2 = limit->p2.p2_slow;
566 		else
567 			clock.p2 = limit->p2.p2_fast;
568 	}
569 
570 	memset(best_clock, 0, sizeof(*best_clock));
571 
572 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
573 	     clock.m1++) {
574 		for (clock.m2 = limit->m2.min;
575 		     clock.m2 <= limit->m2.max; clock.m2++) {
576 			/* m1 is always 0 in Pineview */
577 			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
578 				break;
579 			for (clock.n = limit->n.min;
580 			     clock.n <= limit->n.max; clock.n++) {
581 				for (clock.p1 = limit->p1.min;
582 					clock.p1 <= limit->p1.max; clock.p1++) {
583 					int this_err;
584 
585 					intel_clock(dev, refclk, &clock);
586 					if (!intel_PLL_is_valid(dev, limit,
587 								&clock))
588 						continue;
589 					if (match_clock &&
590 					    clock.p != match_clock->p)
591 						continue;
592 
593 					this_err = abs(clock.dot - target);
594 					if (this_err < err) {
595 						*best_clock = clock;
596 						err = this_err;
597 					}
598 				}
599 			}
600 		}
601 	}
602 
603 	return (err != target);
604 }
605 
606 static bool
intel_g4x_find_best_PLL(const intel_limit_t * limit,struct drm_crtc * crtc,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)607 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
608 			int target, int refclk, intel_clock_t *match_clock,
609 			intel_clock_t *best_clock)
610 {
611 	struct drm_device *dev = crtc->dev;
612 	struct drm_i915_private *dev_priv = dev->dev_private;
613 	intel_clock_t clock;
614 	int max_n;
615 	bool found;
616 	/* approximately equals target * 0.00585 */
617 	int err_most = (target >> 8) + (target >> 9);
618 	found = false;
619 
620 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
621 		int lvds_reg;
622 
623 		if (HAS_PCH_SPLIT(dev))
624 			lvds_reg = PCH_LVDS;
625 		else
626 			lvds_reg = LVDS;
627 		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
628 		    LVDS_CLKB_POWER_UP)
629 			clock.p2 = limit->p2.p2_fast;
630 		else
631 			clock.p2 = limit->p2.p2_slow;
632 	} else {
633 		if (target < limit->p2.dot_limit)
634 			clock.p2 = limit->p2.p2_slow;
635 		else
636 			clock.p2 = limit->p2.p2_fast;
637 	}
638 
639 	memset(best_clock, 0, sizeof(*best_clock));
640 	max_n = limit->n.max;
641 	/* based on hardware requirement, prefer smaller n to precision */
642 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
643 		/* based on hardware requirement, prefere larger m1,m2 */
644 		for (clock.m1 = limit->m1.max;
645 		     clock.m1 >= limit->m1.min; clock.m1--) {
646 			for (clock.m2 = limit->m2.max;
647 			     clock.m2 >= limit->m2.min; clock.m2--) {
648 				for (clock.p1 = limit->p1.max;
649 				     clock.p1 >= limit->p1.min; clock.p1--) {
650 					int this_err;
651 
652 					intel_clock(dev, refclk, &clock);
653 					if (!intel_PLL_is_valid(dev, limit,
654 								&clock))
655 						continue;
656 					if (match_clock &&
657 					    clock.p != match_clock->p)
658 						continue;
659 
660 					this_err = abs(clock.dot - target);
661 					if (this_err < err_most) {
662 						*best_clock = clock;
663 						err_most = this_err;
664 						max_n = clock.n;
665 						found = true;
666 					}
667 				}
668 			}
669 		}
670 	}
671 	return found;
672 }
673 
674 static bool
intel_find_pll_ironlake_dp(const intel_limit_t * limit,struct drm_crtc * crtc,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)675 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
676 			   int target, int refclk, intel_clock_t *match_clock,
677 			   intel_clock_t *best_clock)
678 {
679 	struct drm_device *dev = crtc->dev;
680 	intel_clock_t clock;
681 
682 	if (target < 200000) {
683 		clock.n = 1;
684 		clock.p1 = 2;
685 		clock.p2 = 10;
686 		clock.m1 = 12;
687 		clock.m2 = 9;
688 	} else {
689 		clock.n = 2;
690 		clock.p1 = 1;
691 		clock.p2 = 10;
692 		clock.m1 = 14;
693 		clock.m2 = 8;
694 	}
695 	intel_clock(dev, refclk, &clock);
696 	memcpy(best_clock, &clock, sizeof(intel_clock_t));
697 	return true;
698 }
699 
700 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
701 static bool
intel_find_pll_g4x_dp(const intel_limit_t * limit,struct drm_crtc * crtc,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)702 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
703 		      int target, int refclk, intel_clock_t *match_clock,
704 		      intel_clock_t *best_clock)
705 {
706 	intel_clock_t clock;
707 	if (target < 200000) {
708 		clock.p1 = 2;
709 		clock.p2 = 10;
710 		clock.n = 2;
711 		clock.m1 = 23;
712 		clock.m2 = 8;
713 	} else {
714 		clock.p1 = 1;
715 		clock.p2 = 10;
716 		clock.n = 1;
717 		clock.m1 = 14;
718 		clock.m2 = 2;
719 	}
720 	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
721 	clock.p = (clock.p1 * clock.p2);
722 	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
723 	clock.vco = 0;
724 	memcpy(best_clock, &clock, sizeof(intel_clock_t));
725 	return true;
726 }
727 
728 /**
729  * intel_wait_for_vblank - wait for vblank on a given pipe
730  * @dev: drm device
731  * @pipe: pipe to wait for
732  *
733  * Wait for vblank to occur on a given pipe.  Needed for various bits of
734  * mode setting code.
735  */
intel_wait_for_vblank(struct drm_device * dev,int pipe)736 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
737 {
738 	struct drm_i915_private *dev_priv = dev->dev_private;
739 	int pipestat_reg = PIPESTAT(pipe);
740 
741 	/* Clear existing vblank status. Note this will clear any other
742 	 * sticky status fields as well.
743 	 *
744 	 * This races with i915_driver_irq_handler() with the result
745 	 * that either function could miss a vblank event.  Here it is not
746 	 * fatal, as we will either wait upon the next vblank interrupt or
747 	 * timeout.  Generally speaking intel_wait_for_vblank() is only
748 	 * called during modeset at which time the GPU should be idle and
749 	 * should *not* be performing page flips and thus not waiting on
750 	 * vblanks...
751 	 * Currently, the result of us stealing a vblank from the irq
752 	 * handler is that a single frame will be skipped during swapbuffers.
753 	 */
754 	I915_WRITE(pipestat_reg,
755 		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
756 
757 	/* Wait for vblank interrupt bit to set */
758 	if (wait_for(I915_READ(pipestat_reg) &
759 		     PIPE_VBLANK_INTERRUPT_STATUS,
760 		     50))
761 		DRM_DEBUG_KMS("vblank wait timed out\n");
762 }
763 
764 /*
765  * intel_wait_for_pipe_off - wait for pipe to turn off
766  * @dev: drm device
767  * @pipe: pipe to wait for
768  *
769  * After disabling a pipe, we can't wait for vblank in the usual way,
770  * spinning on the vblank interrupt status bit, since we won't actually
771  * see an interrupt when the pipe is disabled.
772  *
773  * On Gen4 and above:
774  *   wait for the pipe register state bit to turn off
775  *
776  * Otherwise:
777  *   wait for the display line value to settle (it usually
778  *   ends up stopping at the start of the next frame).
779  *
780  */
intel_wait_for_pipe_off(struct drm_device * dev,int pipe)781 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
782 {
783 	struct drm_i915_private *dev_priv = dev->dev_private;
784 
785 	if (INTEL_INFO(dev)->gen >= 4) {
786 		int reg = PIPECONF(pipe);
787 
788 		/* Wait for the Pipe State to go off */
789 		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
790 			     100))
791 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
792 	} else {
793 		u32 last_line;
794 		int reg = PIPEDSL(pipe);
795 		unsigned long timeout = jiffies + msecs_to_jiffies(100);
796 
797 		/* Wait for the display line to settle */
798 		do {
799 			last_line = I915_READ(reg) & DSL_LINEMASK;
800 			mdelay(5);
801 		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
802 			 time_after(timeout, jiffies));
803 		if (time_after(jiffies, timeout))
804 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
805 	}
806 }
807 
state_string(bool enabled)808 static const char *state_string(bool enabled)
809 {
810 	return enabled ? "on" : "off";
811 }
812 
813 /* Only for pre-ILK configs */
assert_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)814 static void assert_pll(struct drm_i915_private *dev_priv,
815 		       enum pipe pipe, bool state)
816 {
817 	int reg;
818 	u32 val;
819 	bool cur_state;
820 
821 	reg = DPLL(pipe);
822 	val = I915_READ(reg);
823 	cur_state = !!(val & DPLL_VCO_ENABLE);
824 	WARN(cur_state != state,
825 	     "PLL state assertion failure (expected %s, current %s)\n",
826 	     state_string(state), state_string(cur_state));
827 }
828 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
829 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
830 
831 /* For ILK+ */
assert_pch_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)832 static void assert_pch_pll(struct drm_i915_private *dev_priv,
833 			   enum pipe pipe, bool state)
834 {
835 	int reg;
836 	u32 val;
837 	bool cur_state;
838 
839 	if (HAS_PCH_CPT(dev_priv->dev)) {
840 		u32 pch_dpll;
841 
842 		pch_dpll = I915_READ(PCH_DPLL_SEL);
843 
844 		/* Make sure the selected PLL is enabled to the transcoder */
845 		WARN(!((pch_dpll >> (4 * pipe)) & 8),
846 		     "transcoder %d PLL not enabled\n", pipe);
847 
848 		/* Convert the transcoder pipe number to a pll pipe number */
849 		pipe = (pch_dpll >> (4 * pipe)) & 1;
850 	}
851 
852 	reg = PCH_DPLL(pipe);
853 	val = I915_READ(reg);
854 	cur_state = !!(val & DPLL_VCO_ENABLE);
855 	WARN(cur_state != state,
856 	     "PCH PLL state assertion failure (expected %s, current %s)\n",
857 	     state_string(state), state_string(cur_state));
858 }
859 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
860 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
861 
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)862 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
863 			  enum pipe pipe, bool state)
864 {
865 	int reg;
866 	u32 val;
867 	bool cur_state;
868 
869 	reg = FDI_TX_CTL(pipe);
870 	val = I915_READ(reg);
871 	cur_state = !!(val & FDI_TX_ENABLE);
872 	WARN(cur_state != state,
873 	     "FDI TX state assertion failure (expected %s, current %s)\n",
874 	     state_string(state), state_string(cur_state));
875 }
876 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
877 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
878 
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)879 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
880 			  enum pipe pipe, bool state)
881 {
882 	int reg;
883 	u32 val;
884 	bool cur_state;
885 
886 	reg = FDI_RX_CTL(pipe);
887 	val = I915_READ(reg);
888 	cur_state = !!(val & FDI_RX_ENABLE);
889 	WARN(cur_state != state,
890 	     "FDI RX state assertion failure (expected %s, current %s)\n",
891 	     state_string(state), state_string(cur_state));
892 }
893 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
894 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
895 
assert_fdi_tx_pll_enabled(struct drm_i915_private * dev_priv,enum pipe pipe)896 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
897 				      enum pipe pipe)
898 {
899 	int reg;
900 	u32 val;
901 
902 	/* ILK FDI PLL is always enabled */
903 	if (dev_priv->info->gen == 5)
904 		return;
905 
906 	reg = FDI_TX_CTL(pipe);
907 	val = I915_READ(reg);
908 	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
909 }
910 
assert_fdi_rx_pll_enabled(struct drm_i915_private * dev_priv,enum pipe pipe)911 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
912 				      enum pipe pipe)
913 {
914 	int reg;
915 	u32 val;
916 
917 	reg = FDI_RX_CTL(pipe);
918 	val = I915_READ(reg);
919 	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
920 }
921 
assert_panel_unlocked(struct drm_i915_private * dev_priv,enum pipe pipe)922 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
923 				  enum pipe pipe)
924 {
925 	int pp_reg, lvds_reg;
926 	u32 val;
927 	enum pipe panel_pipe = PIPE_A;
928 	bool locked = true;
929 
930 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
931 		pp_reg = PCH_PP_CONTROL;
932 		lvds_reg = PCH_LVDS;
933 	} else {
934 		pp_reg = PP_CONTROL;
935 		lvds_reg = LVDS;
936 	}
937 
938 	val = I915_READ(pp_reg);
939 	if (!(val & PANEL_POWER_ON) ||
940 	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
941 		locked = false;
942 
943 	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
944 		panel_pipe = PIPE_B;
945 
946 	WARN(panel_pipe == pipe && locked,
947 	     "panel assertion failure, pipe %c regs locked\n",
948 	     pipe_name(pipe));
949 }
950 
assert_pipe(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)951 void assert_pipe(struct drm_i915_private *dev_priv,
952 		 enum pipe pipe, bool state)
953 {
954 	int reg;
955 	u32 val;
956 	bool cur_state;
957 
958 	/* if we need the pipe A quirk it must be always on */
959 	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
960 		state = true;
961 
962 	reg = PIPECONF(pipe);
963 	val = I915_READ(reg);
964 	cur_state = !!(val & PIPECONF_ENABLE);
965 	WARN(cur_state != state,
966 	     "pipe %c assertion failure (expected %s, current %s)\n",
967 	     pipe_name(pipe), state_string(state), state_string(cur_state));
968 }
969 
assert_plane(struct drm_i915_private * dev_priv,enum plane plane,bool state)970 static void assert_plane(struct drm_i915_private *dev_priv,
971 			 enum plane plane, bool state)
972 {
973 	int reg;
974 	u32 val;
975 	bool cur_state;
976 
977 	reg = DSPCNTR(plane);
978 	val = I915_READ(reg);
979 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
980 	WARN(cur_state != state,
981 	     "plane %c assertion failure (expected %s, current %s)\n",
982 	     plane_name(plane), state_string(state), state_string(cur_state));
983 }
984 
985 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
986 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
987 
assert_planes_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)988 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
989 				   enum pipe pipe)
990 {
991 	int reg, i;
992 	u32 val;
993 	int cur_pipe;
994 
995 	/* Planes are fixed to pipes on ILK+ */
996 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
997 		reg = DSPCNTR(pipe);
998 		val = I915_READ(reg);
999 		WARN((val & DISPLAY_PLANE_ENABLE),
1000 		     "plane %c assertion failure, should be disabled but not\n",
1001 		     plane_name(pipe));
1002 		return;
1003 	}
1004 
1005 	/* Need to check both planes against the pipe */
1006 	for (i = 0; i < 2; i++) {
1007 		reg = DSPCNTR(i);
1008 		val = I915_READ(reg);
1009 		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1010 			DISPPLANE_SEL_PIPE_SHIFT;
1011 		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1012 		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1013 		     plane_name(i), pipe_name(pipe));
1014 	}
1015 }
1016 
assert_pch_refclk_enabled(struct drm_i915_private * dev_priv)1017 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1018 {
1019 	u32 val;
1020 	bool enabled;
1021 
1022 	val = I915_READ(PCH_DREF_CONTROL);
1023 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1024 			    DREF_SUPERSPREAD_SOURCE_MASK));
1025 	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1026 }
1027 
assert_transcoder_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1028 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1029 				       enum pipe pipe)
1030 {
1031 	int reg;
1032 	u32 val;
1033 	bool enabled;
1034 
1035 	reg = TRANSCONF(pipe);
1036 	val = I915_READ(reg);
1037 	enabled = !!(val & TRANS_ENABLE);
1038 	WARN(enabled,
1039 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1040 	     pipe_name(pipe));
1041 }
1042 
dp_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 port_sel,u32 val)1043 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1044 			    enum pipe pipe, u32 port_sel, u32 val)
1045 {
1046 	if ((val & DP_PORT_EN) == 0)
1047 		return false;
1048 
1049 	if (HAS_PCH_CPT(dev_priv->dev)) {
1050 		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1051 		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1052 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1053 			return false;
1054 	} else {
1055 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1056 			return false;
1057 	}
1058 	return true;
1059 }
1060 
hdmi_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 val)1061 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1062 			      enum pipe pipe, u32 val)
1063 {
1064 	if ((val & PORT_ENABLE) == 0)
1065 		return false;
1066 
1067 	if (HAS_PCH_CPT(dev_priv->dev)) {
1068 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1069 			return false;
1070 	} else {
1071 		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1072 			return false;
1073 	}
1074 	return true;
1075 }
1076 
lvds_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 val)1077 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1078 			      enum pipe pipe, u32 val)
1079 {
1080 	if ((val & LVDS_PORT_EN) == 0)
1081 		return false;
1082 
1083 	if (HAS_PCH_CPT(dev_priv->dev)) {
1084 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1085 			return false;
1086 	} else {
1087 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1088 			return false;
1089 	}
1090 	return true;
1091 }
1092 
adpa_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 val)1093 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1094 			      enum pipe pipe, u32 val)
1095 {
1096 	if ((val & ADPA_DAC_ENABLE) == 0)
1097 		return false;
1098 	if (HAS_PCH_CPT(dev_priv->dev)) {
1099 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1100 			return false;
1101 	} else {
1102 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1103 			return false;
1104 	}
1105 	return true;
1106 }
1107 
assert_pch_dp_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,int reg,u32 port_sel)1108 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1109 				   enum pipe pipe, int reg, u32 port_sel)
1110 {
1111 	u32 val = I915_READ(reg);
1112 	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1113 	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1114 	     reg, pipe_name(pipe));
1115 }
1116 
assert_pch_hdmi_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,int reg)1117 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1118 				     enum pipe pipe, int reg)
1119 {
1120 	u32 val = I915_READ(reg);
1121 	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1122 	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1123 	     reg, pipe_name(pipe));
1124 }
1125 
assert_pch_ports_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1126 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1127 				      enum pipe pipe)
1128 {
1129 	int reg;
1130 	u32 val;
1131 
1132 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1133 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1134 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1135 
1136 	reg = PCH_ADPA;
1137 	val = I915_READ(reg);
1138 	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1139 	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1140 	     pipe_name(pipe));
1141 
1142 	reg = PCH_LVDS;
1143 	val = I915_READ(reg);
1144 	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1145 	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1146 	     pipe_name(pipe));
1147 
1148 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1149 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1150 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1151 }
1152 
1153 /**
1154  * intel_enable_pll - enable a PLL
1155  * @dev_priv: i915 private structure
1156  * @pipe: pipe PLL to enable
1157  *
1158  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1159  * make sure the PLL reg is writable first though, since the panel write
1160  * protect mechanism may be enabled.
1161  *
1162  * Note!  This is for pre-ILK only.
1163  */
intel_enable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1164 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1165 {
1166 	int reg;
1167 	u32 val;
1168 
1169 	/* No really, not for ILK+ */
1170 	BUG_ON(dev_priv->info->gen >= 5);
1171 
1172 	/* PLL is protected by panel, make sure we can write it */
1173 	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1174 		assert_panel_unlocked(dev_priv, pipe);
1175 
1176 	reg = DPLL(pipe);
1177 	val = I915_READ(reg);
1178 	val |= DPLL_VCO_ENABLE;
1179 
1180 	/* We do this three times for luck */
1181 	I915_WRITE(reg, val);
1182 	POSTING_READ(reg);
1183 	udelay(150); /* wait for warmup */
1184 	I915_WRITE(reg, val);
1185 	POSTING_READ(reg);
1186 	udelay(150); /* wait for warmup */
1187 	I915_WRITE(reg, val);
1188 	POSTING_READ(reg);
1189 	udelay(150); /* wait for warmup */
1190 }
1191 
1192 /**
1193  * intel_disable_pll - disable a PLL
1194  * @dev_priv: i915 private structure
1195  * @pipe: pipe PLL to disable
1196  *
1197  * Disable the PLL for @pipe, making sure the pipe is off first.
1198  *
1199  * Note!  This is for pre-ILK only.
1200  */
intel_disable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1201 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1202 {
1203 	int reg;
1204 	u32 val;
1205 
1206 	/* Don't disable pipe A or pipe A PLLs if needed */
1207 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1208 		return;
1209 
1210 	/* Make sure the pipe isn't still relying on us */
1211 	assert_pipe_disabled(dev_priv, pipe);
1212 
1213 	reg = DPLL(pipe);
1214 	val = I915_READ(reg);
1215 	val &= ~DPLL_VCO_ENABLE;
1216 	I915_WRITE(reg, val);
1217 	POSTING_READ(reg);
1218 }
1219 
1220 /**
1221  * intel_enable_pch_pll - enable PCH PLL
1222  * @dev_priv: i915 private structure
1223  * @pipe: pipe PLL to enable
1224  *
1225  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1226  * drives the transcoder clock.
1227  */
intel_enable_pch_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1228 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1229 				 enum pipe pipe)
1230 {
1231 	int reg;
1232 	u32 val;
1233 
1234 	if (pipe > 1)
1235 		return;
1236 
1237 	/* PCH only available on ILK+ */
1238 	BUG_ON(dev_priv->info->gen < 5);
1239 
1240 	/* PCH refclock must be enabled first */
1241 	assert_pch_refclk_enabled(dev_priv);
1242 
1243 	reg = PCH_DPLL(pipe);
1244 	val = I915_READ(reg);
1245 	val |= DPLL_VCO_ENABLE;
1246 	I915_WRITE(reg, val);
1247 	POSTING_READ(reg);
1248 	udelay(200);
1249 }
1250 
intel_disable_pch_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1251 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1252 				  enum pipe pipe)
1253 {
1254 	int reg;
1255 	u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1256 		pll_sel = TRANSC_DPLL_ENABLE;
1257 
1258 	if (pipe > 1)
1259 		return;
1260 
1261 	/* PCH only available on ILK+ */
1262 	BUG_ON(dev_priv->info->gen < 5);
1263 
1264 	/* Make sure transcoder isn't still depending on us */
1265 	assert_transcoder_disabled(dev_priv, pipe);
1266 
1267 	if (pipe == 0)
1268 		pll_sel |= TRANSC_DPLLA_SEL;
1269 	else if (pipe == 1)
1270 		pll_sel |= TRANSC_DPLLB_SEL;
1271 
1272 
1273 	if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1274 		return;
1275 
1276 	reg = PCH_DPLL(pipe);
1277 	val = I915_READ(reg);
1278 	val &= ~DPLL_VCO_ENABLE;
1279 	I915_WRITE(reg, val);
1280 	POSTING_READ(reg);
1281 	udelay(200);
1282 }
1283 
intel_enable_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)1284 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1285 				    enum pipe pipe)
1286 {
1287 	int reg;
1288 	u32 val, pipeconf_val;
1289 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1290 
1291 	/* PCH only available on ILK+ */
1292 	BUG_ON(dev_priv->info->gen < 5);
1293 
1294 	/* Make sure PCH DPLL is enabled */
1295 	assert_pch_pll_enabled(dev_priv, pipe);
1296 
1297 	/* FDI must be feeding us bits for PCH ports */
1298 	assert_fdi_tx_enabled(dev_priv, pipe);
1299 	assert_fdi_rx_enabled(dev_priv, pipe);
1300 
1301 	reg = TRANSCONF(pipe);
1302 	val = I915_READ(reg);
1303 	pipeconf_val = I915_READ(PIPECONF(pipe));
1304 
1305 	if (HAS_PCH_IBX(dev_priv->dev)) {
1306 		/*
1307 		 * make the BPC in transcoder be consistent with
1308 		 * that in pipeconf reg.
1309 		 */
1310 		val &= ~PIPE_BPC_MASK;
1311 		val |= pipeconf_val & PIPE_BPC_MASK;
1312 	}
1313 
1314 	val &= ~TRANS_INTERLACE_MASK;
1315 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1316 		if (HAS_PCH_IBX(dev_priv->dev) &&
1317 		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1318 			val |= TRANS_LEGACY_INTERLACED_ILK;
1319 		else
1320 			val |= TRANS_INTERLACED;
1321 	else
1322 		val |= TRANS_PROGRESSIVE;
1323 
1324 	I915_WRITE(reg, val | TRANS_ENABLE);
1325 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1326 		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1327 }
1328 
intel_disable_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)1329 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1330 				     enum pipe pipe)
1331 {
1332 	int reg;
1333 	u32 val;
1334 
1335 	/* FDI relies on the transcoder */
1336 	assert_fdi_tx_disabled(dev_priv, pipe);
1337 	assert_fdi_rx_disabled(dev_priv, pipe);
1338 
1339 	/* Ports must be off as well */
1340 	assert_pch_ports_disabled(dev_priv, pipe);
1341 
1342 	reg = TRANSCONF(pipe);
1343 	val = I915_READ(reg);
1344 	val &= ~TRANS_ENABLE;
1345 	I915_WRITE(reg, val);
1346 	/* wait for PCH transcoder off, transcoder state */
1347 	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1348 		DRM_ERROR("failed to disable transcoder %d\n", pipe);
1349 }
1350 
1351 /**
1352  * intel_enable_pipe - enable a pipe, asserting requirements
1353  * @dev_priv: i915 private structure
1354  * @pipe: pipe to enable
1355  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1356  *
1357  * Enable @pipe, making sure that various hardware specific requirements
1358  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1359  *
1360  * @pipe should be %PIPE_A or %PIPE_B.
1361  *
1362  * Will wait until the pipe is actually running (i.e. first vblank) before
1363  * returning.
1364  */
intel_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe,bool pch_port)1365 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1366 			      bool pch_port)
1367 {
1368 	int reg;
1369 	u32 val;
1370 
1371 	/*
1372 	 * A pipe without a PLL won't actually be able to drive bits from
1373 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1374 	 * need the check.
1375 	 */
1376 	if (!HAS_PCH_SPLIT(dev_priv->dev))
1377 		assert_pll_enabled(dev_priv, pipe);
1378 	else {
1379 		if (pch_port) {
1380 			/* if driving the PCH, we need FDI enabled */
1381 			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1382 			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1383 		}
1384 		/* FIXME: assert CPU port conditions for SNB+ */
1385 	}
1386 
1387 	reg = PIPECONF(pipe);
1388 	val = I915_READ(reg);
1389 	if (val & PIPECONF_ENABLE)
1390 		return;
1391 
1392 	I915_WRITE(reg, val | PIPECONF_ENABLE);
1393 	intel_wait_for_vblank(dev_priv->dev, pipe);
1394 }
1395 
1396 /**
1397  * intel_disable_pipe - disable a pipe, asserting requirements
1398  * @dev_priv: i915 private structure
1399  * @pipe: pipe to disable
1400  *
1401  * Disable @pipe, making sure that various hardware specific requirements
1402  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1403  *
1404  * @pipe should be %PIPE_A or %PIPE_B.
1405  *
1406  * Will wait until the pipe has shut down before returning.
1407  */
intel_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)1408 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1409 			       enum pipe pipe)
1410 {
1411 	int reg;
1412 	u32 val;
1413 
1414 	/*
1415 	 * Make sure planes won't keep trying to pump pixels to us,
1416 	 * or we might hang the display.
1417 	 */
1418 	assert_planes_disabled(dev_priv, pipe);
1419 
1420 	/* Don't disable pipe A or pipe A PLLs if needed */
1421 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1422 		return;
1423 
1424 	reg = PIPECONF(pipe);
1425 	val = I915_READ(reg);
1426 	if ((val & PIPECONF_ENABLE) == 0)
1427 		return;
1428 
1429 	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1430 	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1431 }
1432 
1433 /*
1434  * Plane regs are double buffered, going from enabled->disabled needs a
1435  * trigger in order to latch.  The display address reg provides this.
1436  */
intel_flush_display_plane(struct drm_i915_private * dev_priv,enum plane plane)1437 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1438 				      enum plane plane)
1439 {
1440 	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1441 	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1442 }
1443 
1444 /**
1445  * intel_enable_plane - enable a display plane on a given pipe
1446  * @dev_priv: i915 private structure
1447  * @plane: plane to enable
1448  * @pipe: pipe being fed
1449  *
1450  * Enable @plane on @pipe, making sure that @pipe is running first.
1451  */
intel_enable_plane(struct drm_i915_private * dev_priv,enum plane plane,enum pipe pipe)1452 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1453 			       enum plane plane, enum pipe pipe)
1454 {
1455 	int reg;
1456 	u32 val;
1457 
1458 	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1459 	assert_pipe_enabled(dev_priv, pipe);
1460 
1461 	reg = DSPCNTR(plane);
1462 	val = I915_READ(reg);
1463 	if (val & DISPLAY_PLANE_ENABLE)
1464 		return;
1465 
1466 	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1467 	intel_flush_display_plane(dev_priv, plane);
1468 	intel_wait_for_vblank(dev_priv->dev, pipe);
1469 }
1470 
1471 /**
1472  * intel_disable_plane - disable a display plane
1473  * @dev_priv: i915 private structure
1474  * @plane: plane to disable
1475  * @pipe: pipe consuming the data
1476  *
1477  * Disable @plane; should be an independent operation.
1478  */
intel_disable_plane(struct drm_i915_private * dev_priv,enum plane plane,enum pipe pipe)1479 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1480 				enum plane plane, enum pipe pipe)
1481 {
1482 	int reg;
1483 	u32 val;
1484 
1485 	reg = DSPCNTR(plane);
1486 	val = I915_READ(reg);
1487 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1488 		return;
1489 
1490 	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1491 	intel_flush_display_plane(dev_priv, plane);
1492 	intel_wait_for_vblank(dev_priv->dev, pipe);
1493 }
1494 
disable_pch_dp(struct drm_i915_private * dev_priv,enum pipe pipe,int reg,u32 port_sel)1495 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1496 			   enum pipe pipe, int reg, u32 port_sel)
1497 {
1498 	u32 val = I915_READ(reg);
1499 	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1500 		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1501 		I915_WRITE(reg, val & ~DP_PORT_EN);
1502 	}
1503 }
1504 
disable_pch_hdmi(struct drm_i915_private * dev_priv,enum pipe pipe,int reg)1505 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1506 			     enum pipe pipe, int reg)
1507 {
1508 	u32 val = I915_READ(reg);
1509 	if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1510 		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1511 			      reg, pipe);
1512 		I915_WRITE(reg, val & ~PORT_ENABLE);
1513 	}
1514 }
1515 
1516 /* Disable any ports connected to this transcoder */
intel_disable_pch_ports(struct drm_i915_private * dev_priv,enum pipe pipe)1517 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1518 				    enum pipe pipe)
1519 {
1520 	u32 reg, val;
1521 
1522 	val = I915_READ(PCH_PP_CONTROL);
1523 	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1524 
1525 	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1526 	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1527 	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1528 
1529 	reg = PCH_ADPA;
1530 	val = I915_READ(reg);
1531 	if (adpa_pipe_enabled(dev_priv, pipe, val))
1532 		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1533 
1534 	reg = PCH_LVDS;
1535 	val = I915_READ(reg);
1536 	if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1537 		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1538 		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1539 		POSTING_READ(reg);
1540 		udelay(100);
1541 	}
1542 
1543 	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1544 	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1545 	disable_pch_hdmi(dev_priv, pipe, HDMID);
1546 }
1547 
i8xx_disable_fbc(struct drm_device * dev)1548 static void i8xx_disable_fbc(struct drm_device *dev)
1549 {
1550 	struct drm_i915_private *dev_priv = dev->dev_private;
1551 	u32 fbc_ctl;
1552 
1553 	/* Disable compression */
1554 	fbc_ctl = I915_READ(FBC_CONTROL);
1555 	if ((fbc_ctl & FBC_CTL_EN) == 0)
1556 		return;
1557 
1558 	fbc_ctl &= ~FBC_CTL_EN;
1559 	I915_WRITE(FBC_CONTROL, fbc_ctl);
1560 
1561 	/* Wait for compressing bit to clear */
1562 	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1563 		DRM_DEBUG_KMS("FBC idle timed out\n");
1564 		return;
1565 	}
1566 
1567 	DRM_DEBUG_KMS("disabled FBC\n");
1568 }
1569 
i8xx_enable_fbc(struct drm_crtc * crtc,unsigned long interval)1570 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1571 {
1572 	struct drm_device *dev = crtc->dev;
1573 	struct drm_i915_private *dev_priv = dev->dev_private;
1574 	struct drm_framebuffer *fb = crtc->fb;
1575 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1576 	struct drm_i915_gem_object *obj = intel_fb->obj;
1577 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1578 	int cfb_pitch;
1579 	int plane, i;
1580 	u32 fbc_ctl, fbc_ctl2;
1581 
1582 	cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1583 	if (fb->pitches[0] < cfb_pitch)
1584 		cfb_pitch = fb->pitches[0];
1585 
1586 	/* FBC_CTL wants 64B units */
1587 	cfb_pitch = (cfb_pitch / 64) - 1;
1588 	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1589 
1590 	/* Clear old tags */
1591 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1592 		I915_WRITE(FBC_TAG + (i * 4), 0);
1593 
1594 	/* Set it up... */
1595 	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1596 	fbc_ctl2 |= plane;
1597 	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1598 	I915_WRITE(FBC_FENCE_OFF, crtc->y);
1599 
1600 	/* enable it... */
1601 	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1602 	if (IS_I945GM(dev))
1603 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1604 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1605 	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1606 	fbc_ctl |= obj->fence_reg;
1607 	I915_WRITE(FBC_CONTROL, fbc_ctl);
1608 
1609 	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1610 		      cfb_pitch, crtc->y, intel_crtc->plane);
1611 }
1612 
i8xx_fbc_enabled(struct drm_device * dev)1613 static bool i8xx_fbc_enabled(struct drm_device *dev)
1614 {
1615 	struct drm_i915_private *dev_priv = dev->dev_private;
1616 
1617 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1618 }
1619 
g4x_enable_fbc(struct drm_crtc * crtc,unsigned long interval)1620 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1621 {
1622 	struct drm_device *dev = crtc->dev;
1623 	struct drm_i915_private *dev_priv = dev->dev_private;
1624 	struct drm_framebuffer *fb = crtc->fb;
1625 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1626 	struct drm_i915_gem_object *obj = intel_fb->obj;
1627 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1628 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1629 	unsigned long stall_watermark = 200;
1630 	u32 dpfc_ctl;
1631 
1632 	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1633 	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1634 	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1635 
1636 	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1637 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1638 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1639 	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1640 
1641 	/* enable it... */
1642 	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1643 
1644 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1645 }
1646 
g4x_disable_fbc(struct drm_device * dev)1647 static void g4x_disable_fbc(struct drm_device *dev)
1648 {
1649 	struct drm_i915_private *dev_priv = dev->dev_private;
1650 	u32 dpfc_ctl;
1651 
1652 	/* Disable compression */
1653 	dpfc_ctl = I915_READ(DPFC_CONTROL);
1654 	if (dpfc_ctl & DPFC_CTL_EN) {
1655 		dpfc_ctl &= ~DPFC_CTL_EN;
1656 		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1657 
1658 		DRM_DEBUG_KMS("disabled FBC\n");
1659 	}
1660 }
1661 
g4x_fbc_enabled(struct drm_device * dev)1662 static bool g4x_fbc_enabled(struct drm_device *dev)
1663 {
1664 	struct drm_i915_private *dev_priv = dev->dev_private;
1665 
1666 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1667 }
1668 
sandybridge_blit_fbc_update(struct drm_device * dev)1669 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1670 {
1671 	struct drm_i915_private *dev_priv = dev->dev_private;
1672 	u32 blt_ecoskpd;
1673 
1674 	/* Make sure blitter notifies FBC of writes */
1675 	gen6_gt_force_wake_get(dev_priv);
1676 	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1677 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1678 		GEN6_BLITTER_LOCK_SHIFT;
1679 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1680 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1681 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1682 	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1683 			 GEN6_BLITTER_LOCK_SHIFT);
1684 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1685 	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1686 	gen6_gt_force_wake_put(dev_priv);
1687 }
1688 
ironlake_enable_fbc(struct drm_crtc * crtc,unsigned long interval)1689 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1690 {
1691 	struct drm_device *dev = crtc->dev;
1692 	struct drm_i915_private *dev_priv = dev->dev_private;
1693 	struct drm_framebuffer *fb = crtc->fb;
1694 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1695 	struct drm_i915_gem_object *obj = intel_fb->obj;
1696 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1697 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1698 	unsigned long stall_watermark = 200;
1699 	u32 dpfc_ctl;
1700 
1701 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1702 	dpfc_ctl &= DPFC_RESERVED;
1703 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1704 	/* Set persistent mode for front-buffer rendering, ala X. */
1705 	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1706 	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1707 	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1708 
1709 	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1710 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1711 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1712 	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1713 	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1714 	/* enable it... */
1715 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1716 
1717 	if (IS_GEN6(dev)) {
1718 		I915_WRITE(SNB_DPFC_CTL_SA,
1719 			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1720 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1721 		sandybridge_blit_fbc_update(dev);
1722 	}
1723 
1724 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1725 }
1726 
ironlake_disable_fbc(struct drm_device * dev)1727 static void ironlake_disable_fbc(struct drm_device *dev)
1728 {
1729 	struct drm_i915_private *dev_priv = dev->dev_private;
1730 	u32 dpfc_ctl;
1731 
1732 	/* Disable compression */
1733 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1734 	if (dpfc_ctl & DPFC_CTL_EN) {
1735 		dpfc_ctl &= ~DPFC_CTL_EN;
1736 		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1737 
1738 		DRM_DEBUG_KMS("disabled FBC\n");
1739 	}
1740 }
1741 
ironlake_fbc_enabled(struct drm_device * dev)1742 static bool ironlake_fbc_enabled(struct drm_device *dev)
1743 {
1744 	struct drm_i915_private *dev_priv = dev->dev_private;
1745 
1746 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1747 }
1748 
intel_fbc_enabled(struct drm_device * dev)1749 bool intel_fbc_enabled(struct drm_device *dev)
1750 {
1751 	struct drm_i915_private *dev_priv = dev->dev_private;
1752 
1753 	if (!dev_priv->display.fbc_enabled)
1754 		return false;
1755 
1756 	return dev_priv->display.fbc_enabled(dev);
1757 }
1758 
intel_fbc_work_fn(struct work_struct * __work)1759 static void intel_fbc_work_fn(struct work_struct *__work)
1760 {
1761 	struct intel_fbc_work *work =
1762 		container_of(to_delayed_work(__work),
1763 			     struct intel_fbc_work, work);
1764 	struct drm_device *dev = work->crtc->dev;
1765 	struct drm_i915_private *dev_priv = dev->dev_private;
1766 
1767 	mutex_lock(&dev->struct_mutex);
1768 	if (work == dev_priv->fbc_work) {
1769 		/* Double check that we haven't switched fb without cancelling
1770 		 * the prior work.
1771 		 */
1772 		if (work->crtc->fb == work->fb) {
1773 			dev_priv->display.enable_fbc(work->crtc,
1774 						     work->interval);
1775 
1776 			dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1777 			dev_priv->cfb_fb = work->crtc->fb->base.id;
1778 			dev_priv->cfb_y = work->crtc->y;
1779 		}
1780 
1781 		dev_priv->fbc_work = NULL;
1782 	}
1783 	mutex_unlock(&dev->struct_mutex);
1784 
1785 	kfree(work);
1786 }
1787 
intel_cancel_fbc_work(struct drm_i915_private * dev_priv)1788 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1789 {
1790 	if (dev_priv->fbc_work == NULL)
1791 		return;
1792 
1793 	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1794 
1795 	/* Synchronisation is provided by struct_mutex and checking of
1796 	 * dev_priv->fbc_work, so we can perform the cancellation
1797 	 * entirely asynchronously.
1798 	 */
1799 	if (cancel_delayed_work(&dev_priv->fbc_work->work))
1800 		/* tasklet was killed before being run, clean up */
1801 		kfree(dev_priv->fbc_work);
1802 
1803 	/* Mark the work as no longer wanted so that if it does
1804 	 * wake-up (because the work was already running and waiting
1805 	 * for our mutex), it will discover that is no longer
1806 	 * necessary to run.
1807 	 */
1808 	dev_priv->fbc_work = NULL;
1809 }
1810 
intel_enable_fbc(struct drm_crtc * crtc,unsigned long interval)1811 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1812 {
1813 	struct intel_fbc_work *work;
1814 	struct drm_device *dev = crtc->dev;
1815 	struct drm_i915_private *dev_priv = dev->dev_private;
1816 
1817 	if (!dev_priv->display.enable_fbc)
1818 		return;
1819 
1820 	intel_cancel_fbc_work(dev_priv);
1821 
1822 	work = kzalloc(sizeof *work, GFP_KERNEL);
1823 	if (work == NULL) {
1824 		dev_priv->display.enable_fbc(crtc, interval);
1825 		return;
1826 	}
1827 
1828 	work->crtc = crtc;
1829 	work->fb = crtc->fb;
1830 	work->interval = interval;
1831 	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1832 
1833 	dev_priv->fbc_work = work;
1834 
1835 	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1836 
1837 	/* Delay the actual enabling to let pageflipping cease and the
1838 	 * display to settle before starting the compression. Note that
1839 	 * this delay also serves a second purpose: it allows for a
1840 	 * vblank to pass after disabling the FBC before we attempt
1841 	 * to modify the control registers.
1842 	 *
1843 	 * A more complicated solution would involve tracking vblanks
1844 	 * following the termination of the page-flipping sequence
1845 	 * and indeed performing the enable as a co-routine and not
1846 	 * waiting synchronously upon the vblank.
1847 	 */
1848 	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1849 }
1850 
intel_disable_fbc(struct drm_device * dev)1851 void intel_disable_fbc(struct drm_device *dev)
1852 {
1853 	struct drm_i915_private *dev_priv = dev->dev_private;
1854 
1855 	intel_cancel_fbc_work(dev_priv);
1856 
1857 	if (!dev_priv->display.disable_fbc)
1858 		return;
1859 
1860 	dev_priv->display.disable_fbc(dev);
1861 	dev_priv->cfb_plane = -1;
1862 }
1863 
1864 /**
1865  * intel_update_fbc - enable/disable FBC as needed
1866  * @dev: the drm_device
1867  *
1868  * Set up the framebuffer compression hardware at mode set time.  We
1869  * enable it if possible:
1870  *   - plane A only (on pre-965)
1871  *   - no pixel mulitply/line duplication
1872  *   - no alpha buffer discard
1873  *   - no dual wide
1874  *   - framebuffer <= 2048 in width, 1536 in height
1875  *
1876  * We can't assume that any compression will take place (worst case),
1877  * so the compressed buffer has to be the same size as the uncompressed
1878  * one.  It also must reside (along with the line length buffer) in
1879  * stolen memory.
1880  *
1881  * We need to enable/disable FBC on a global basis.
1882  */
intel_update_fbc(struct drm_device * dev)1883 static void intel_update_fbc(struct drm_device *dev)
1884 {
1885 	struct drm_i915_private *dev_priv = dev->dev_private;
1886 	struct drm_crtc *crtc = NULL, *tmp_crtc;
1887 	struct intel_crtc *intel_crtc;
1888 	struct drm_framebuffer *fb;
1889 	struct intel_framebuffer *intel_fb;
1890 	struct drm_i915_gem_object *obj;
1891 	int enable_fbc;
1892 
1893 	DRM_DEBUG_KMS("\n");
1894 
1895 	if (!i915_powersave)
1896 		return;
1897 
1898 	if (!I915_HAS_FBC(dev))
1899 		return;
1900 
1901 	/*
1902 	 * If FBC is already on, we just have to verify that we can
1903 	 * keep it that way...
1904 	 * Need to disable if:
1905 	 *   - more than one pipe is active
1906 	 *   - changing FBC params (stride, fence, mode)
1907 	 *   - new fb is too large to fit in compressed buffer
1908 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1909 	 */
1910 	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1911 		if (tmp_crtc->enabled && tmp_crtc->fb) {
1912 			if (crtc) {
1913 				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1914 				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1915 				goto out_disable;
1916 			}
1917 			crtc = tmp_crtc;
1918 		}
1919 	}
1920 
1921 	if (!crtc || crtc->fb == NULL) {
1922 		DRM_DEBUG_KMS("no output, disabling\n");
1923 		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1924 		goto out_disable;
1925 	}
1926 
1927 	intel_crtc = to_intel_crtc(crtc);
1928 	fb = crtc->fb;
1929 	intel_fb = to_intel_framebuffer(fb);
1930 	obj = intel_fb->obj;
1931 
1932 	enable_fbc = i915_enable_fbc;
1933 	if (enable_fbc < 0) {
1934 		DRM_DEBUG_KMS("fbc set to per-chip default\n");
1935 		enable_fbc = 1;
1936 		if (INTEL_INFO(dev)->gen <= 6)
1937 			enable_fbc = 0;
1938 	}
1939 	if (!enable_fbc) {
1940 		DRM_DEBUG_KMS("fbc disabled per module param\n");
1941 		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1942 		goto out_disable;
1943 	}
1944 	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1945 		DRM_DEBUG_KMS("framebuffer too large, disabling "
1946 			      "compression\n");
1947 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1948 		goto out_disable;
1949 	}
1950 	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1951 	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1952 		DRM_DEBUG_KMS("mode incompatible with compression, "
1953 			      "disabling\n");
1954 		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1955 		goto out_disable;
1956 	}
1957 	if ((crtc->mode.hdisplay > 2048) ||
1958 	    (crtc->mode.vdisplay > 1536)) {
1959 		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1960 		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1961 		goto out_disable;
1962 	}
1963 	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1964 		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1965 		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1966 		goto out_disable;
1967 	}
1968 
1969 	/* The use of a CPU fence is mandatory in order to detect writes
1970 	 * by the CPU to the scanout and trigger updates to the FBC.
1971 	 */
1972 	if (obj->tiling_mode != I915_TILING_X ||
1973 	    obj->fence_reg == I915_FENCE_REG_NONE) {
1974 		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1975 		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1976 		goto out_disable;
1977 	}
1978 
1979 	/* If the kernel debugger is active, always disable compression */
1980 	if (in_dbg_master())
1981 		goto out_disable;
1982 
1983 	/* If the scanout has not changed, don't modify the FBC settings.
1984 	 * Note that we make the fundamental assumption that the fb->obj
1985 	 * cannot be unpinned (and have its GTT offset and fence revoked)
1986 	 * without first being decoupled from the scanout and FBC disabled.
1987 	 */
1988 	if (dev_priv->cfb_plane == intel_crtc->plane &&
1989 	    dev_priv->cfb_fb == fb->base.id &&
1990 	    dev_priv->cfb_y == crtc->y)
1991 		return;
1992 
1993 	if (intel_fbc_enabled(dev)) {
1994 		/* We update FBC along two paths, after changing fb/crtc
1995 		 * configuration (modeswitching) and after page-flipping
1996 		 * finishes. For the latter, we know that not only did
1997 		 * we disable the FBC at the start of the page-flip
1998 		 * sequence, but also more than one vblank has passed.
1999 		 *
2000 		 * For the former case of modeswitching, it is possible
2001 		 * to switch between two FBC valid configurations
2002 		 * instantaneously so we do need to disable the FBC
2003 		 * before we can modify its control registers. We also
2004 		 * have to wait for the next vblank for that to take
2005 		 * effect. However, since we delay enabling FBC we can
2006 		 * assume that a vblank has passed since disabling and
2007 		 * that we can safely alter the registers in the deferred
2008 		 * callback.
2009 		 *
2010 		 * In the scenario that we go from a valid to invalid
2011 		 * and then back to valid FBC configuration we have
2012 		 * no strict enforcement that a vblank occurred since
2013 		 * disabling the FBC. However, along all current pipe
2014 		 * disabling paths we do need to wait for a vblank at
2015 		 * some point. And we wait before enabling FBC anyway.
2016 		 */
2017 		DRM_DEBUG_KMS("disabling active FBC for update\n");
2018 		intel_disable_fbc(dev);
2019 	}
2020 
2021 	intel_enable_fbc(crtc, 500);
2022 	return;
2023 
2024 out_disable:
2025 	/* Multiple disables should be harmless */
2026 	if (intel_fbc_enabled(dev)) {
2027 		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2028 		intel_disable_fbc(dev);
2029 	}
2030 }
2031 
2032 int
intel_pin_and_fence_fb_obj(struct drm_device * dev,struct drm_i915_gem_object * obj,struct intel_ring_buffer * pipelined)2033 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2034 			   struct drm_i915_gem_object *obj,
2035 			   struct intel_ring_buffer *pipelined)
2036 {
2037 	struct drm_i915_private *dev_priv = dev->dev_private;
2038 	u32 alignment;
2039 	int ret;
2040 
2041 	switch (obj->tiling_mode) {
2042 	case I915_TILING_NONE:
2043 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2044 			alignment = 128 * 1024;
2045 		else if (INTEL_INFO(dev)->gen >= 4)
2046 			alignment = 4 * 1024;
2047 		else
2048 			alignment = 64 * 1024;
2049 		break;
2050 	case I915_TILING_X:
2051 		/* pin() will align the object as required by fence */
2052 		alignment = 0;
2053 		break;
2054 	case I915_TILING_Y:
2055 		/* FIXME: Is this true? */
2056 		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2057 		return -EINVAL;
2058 	default:
2059 		BUG();
2060 	}
2061 
2062 	dev_priv->mm.interruptible = false;
2063 	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2064 	if (ret)
2065 		goto err_interruptible;
2066 
2067 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2068 	 * fence, whereas 965+ only requires a fence if using
2069 	 * framebuffer compression.  For simplicity, we always install
2070 	 * a fence as the cost is not that onerous.
2071 	 */
2072 	if (obj->tiling_mode != I915_TILING_NONE) {
2073 		ret = i915_gem_object_get_fence(obj, pipelined);
2074 		if (ret)
2075 			goto err_unpin;
2076 
2077 		i915_gem_object_pin_fence(obj);
2078 	}
2079 
2080 	dev_priv->mm.interruptible = true;
2081 	return 0;
2082 
2083 err_unpin:
2084 	i915_gem_object_unpin(obj);
2085 err_interruptible:
2086 	dev_priv->mm.interruptible = true;
2087 	return ret;
2088 }
2089 
intel_unpin_fb_obj(struct drm_i915_gem_object * obj)2090 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2091 {
2092 	i915_gem_object_unpin_fence(obj);
2093 	i915_gem_object_unpin(obj);
2094 }
2095 
i9xx_update_plane(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y)2096 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2097 			     int x, int y)
2098 {
2099 	struct drm_device *dev = crtc->dev;
2100 	struct drm_i915_private *dev_priv = dev->dev_private;
2101 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2102 	struct intel_framebuffer *intel_fb;
2103 	struct drm_i915_gem_object *obj;
2104 	int plane = intel_crtc->plane;
2105 	unsigned long Start, Offset;
2106 	u32 dspcntr;
2107 	u32 reg;
2108 
2109 	switch (plane) {
2110 	case 0:
2111 	case 1:
2112 		break;
2113 	default:
2114 		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2115 		return -EINVAL;
2116 	}
2117 
2118 	intel_fb = to_intel_framebuffer(fb);
2119 	obj = intel_fb->obj;
2120 
2121 	reg = DSPCNTR(plane);
2122 	dspcntr = I915_READ(reg);
2123 	/* Mask out pixel format bits in case we change it */
2124 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2125 	switch (fb->bits_per_pixel) {
2126 	case 8:
2127 		dspcntr |= DISPPLANE_8BPP;
2128 		break;
2129 	case 16:
2130 		if (fb->depth == 15)
2131 			dspcntr |= DISPPLANE_15_16BPP;
2132 		else
2133 			dspcntr |= DISPPLANE_16BPP;
2134 		break;
2135 	case 24:
2136 	case 32:
2137 		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2138 		break;
2139 	default:
2140 		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2141 		return -EINVAL;
2142 	}
2143 	if (INTEL_INFO(dev)->gen >= 4) {
2144 		if (obj->tiling_mode != I915_TILING_NONE)
2145 			dspcntr |= DISPPLANE_TILED;
2146 		else
2147 			dspcntr &= ~DISPPLANE_TILED;
2148 	}
2149 
2150 	I915_WRITE(reg, dspcntr);
2151 
2152 	Start = obj->gtt_offset;
2153 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2154 
2155 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2156 		      Start, Offset, x, y, fb->pitches[0]);
2157 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2158 	if (INTEL_INFO(dev)->gen >= 4) {
2159 		I915_WRITE(DSPSURF(plane), Start);
2160 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2161 		I915_WRITE(DSPADDR(plane), Offset);
2162 	} else
2163 		I915_WRITE(DSPADDR(plane), Start + Offset);
2164 	POSTING_READ(reg);
2165 
2166 	return 0;
2167 }
2168 
ironlake_update_plane(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y)2169 static int ironlake_update_plane(struct drm_crtc *crtc,
2170 				 struct drm_framebuffer *fb, int x, int y)
2171 {
2172 	struct drm_device *dev = crtc->dev;
2173 	struct drm_i915_private *dev_priv = dev->dev_private;
2174 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2175 	struct intel_framebuffer *intel_fb;
2176 	struct drm_i915_gem_object *obj;
2177 	int plane = intel_crtc->plane;
2178 	unsigned long Start, Offset;
2179 	u32 dspcntr;
2180 	u32 reg;
2181 
2182 	switch (plane) {
2183 	case 0:
2184 	case 1:
2185 	case 2:
2186 		break;
2187 	default:
2188 		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2189 		return -EINVAL;
2190 	}
2191 
2192 	intel_fb = to_intel_framebuffer(fb);
2193 	obj = intel_fb->obj;
2194 
2195 	reg = DSPCNTR(plane);
2196 	dspcntr = I915_READ(reg);
2197 	/* Mask out pixel format bits in case we change it */
2198 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2199 	switch (fb->bits_per_pixel) {
2200 	case 8:
2201 		dspcntr |= DISPPLANE_8BPP;
2202 		break;
2203 	case 16:
2204 		if (fb->depth != 16)
2205 			return -EINVAL;
2206 
2207 		dspcntr |= DISPPLANE_16BPP;
2208 		break;
2209 	case 24:
2210 	case 32:
2211 		if (fb->depth == 24)
2212 			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2213 		else if (fb->depth == 30)
2214 			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2215 		else
2216 			return -EINVAL;
2217 		break;
2218 	default:
2219 		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2220 		return -EINVAL;
2221 	}
2222 
2223 	if (obj->tiling_mode != I915_TILING_NONE)
2224 		dspcntr |= DISPPLANE_TILED;
2225 	else
2226 		dspcntr &= ~DISPPLANE_TILED;
2227 
2228 	/* must disable */
2229 	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2230 
2231 	I915_WRITE(reg, dspcntr);
2232 
2233 	Start = obj->gtt_offset;
2234 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2235 
2236 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2237 		      Start, Offset, x, y, fb->pitches[0]);
2238 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2239 	I915_WRITE(DSPSURF(plane), Start);
2240 	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2241 	I915_WRITE(DSPADDR(plane), Offset);
2242 	POSTING_READ(reg);
2243 
2244 	return 0;
2245 }
2246 
2247 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2248 static int
intel_pipe_set_base_atomic(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y,enum mode_set_atomic state)2249 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2250 			   int x, int y, enum mode_set_atomic state)
2251 {
2252 	struct drm_device *dev = crtc->dev;
2253 	struct drm_i915_private *dev_priv = dev->dev_private;
2254 	int ret;
2255 
2256 	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2257 	if (ret)
2258 		return ret;
2259 
2260 	intel_update_fbc(dev);
2261 	intel_increase_pllclock(crtc);
2262 
2263 	return 0;
2264 }
2265 
2266 static int
intel_finish_fb(struct drm_framebuffer * old_fb)2267 intel_finish_fb(struct drm_framebuffer *old_fb)
2268 {
2269 	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2270 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2271 	bool was_interruptible = dev_priv->mm.interruptible;
2272 	int ret;
2273 
2274 	wait_event(dev_priv->pending_flip_queue,
2275 		   atomic_read(&dev_priv->mm.wedged) ||
2276 		   atomic_read(&obj->pending_flip) == 0);
2277 
2278 	/* Big Hammer, we also need to ensure that any pending
2279 	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2280 	 * current scanout is retired before unpinning the old
2281 	 * framebuffer.
2282 	 *
2283 	 * This should only fail upon a hung GPU, in which case we
2284 	 * can safely continue.
2285 	 */
2286 	dev_priv->mm.interruptible = false;
2287 	ret = i915_gem_object_finish_gpu(obj);
2288 	dev_priv->mm.interruptible = was_interruptible;
2289 
2290 	return ret;
2291 }
2292 
2293 static int
intel_pipe_set_base(struct drm_crtc * crtc,int x,int y,struct drm_framebuffer * old_fb)2294 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2295 		    struct drm_framebuffer *old_fb)
2296 {
2297 	struct drm_device *dev = crtc->dev;
2298 	struct drm_i915_master_private *master_priv;
2299 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2300 	int ret;
2301 
2302 	/* no fb bound */
2303 	if (!crtc->fb) {
2304 		DRM_ERROR("No FB bound\n");
2305 		return 0;
2306 	}
2307 
2308 	switch (intel_crtc->plane) {
2309 	case 0:
2310 	case 1:
2311 		break;
2312 	case 2:
2313 		if (IS_IVYBRIDGE(dev))
2314 			break;
2315 		/* fall through otherwise */
2316 	default:
2317 		DRM_ERROR("no plane for crtc\n");
2318 		return -EINVAL;
2319 	}
2320 
2321 	mutex_lock(&dev->struct_mutex);
2322 	ret = intel_pin_and_fence_fb_obj(dev,
2323 					 to_intel_framebuffer(crtc->fb)->obj,
2324 					 NULL);
2325 	if (ret != 0) {
2326 		mutex_unlock(&dev->struct_mutex);
2327 		DRM_ERROR("pin & fence failed\n");
2328 		return ret;
2329 	}
2330 
2331 	if (old_fb)
2332 		intel_finish_fb(old_fb);
2333 
2334 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2335 					 LEAVE_ATOMIC_MODE_SET);
2336 	if (ret) {
2337 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2338 		mutex_unlock(&dev->struct_mutex);
2339 		DRM_ERROR("failed to update base address\n");
2340 		return ret;
2341 	}
2342 
2343 	if (old_fb) {
2344 		intel_wait_for_vblank(dev, intel_crtc->pipe);
2345 		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2346 	}
2347 
2348 	mutex_unlock(&dev->struct_mutex);
2349 
2350 	if (!dev->primary->master)
2351 		return 0;
2352 
2353 	master_priv = dev->primary->master->driver_priv;
2354 	if (!master_priv->sarea_priv)
2355 		return 0;
2356 
2357 	if (intel_crtc->pipe) {
2358 		master_priv->sarea_priv->pipeB_x = x;
2359 		master_priv->sarea_priv->pipeB_y = y;
2360 	} else {
2361 		master_priv->sarea_priv->pipeA_x = x;
2362 		master_priv->sarea_priv->pipeA_y = y;
2363 	}
2364 
2365 	return 0;
2366 }
2367 
ironlake_set_pll_edp(struct drm_crtc * crtc,int clock)2368 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2369 {
2370 	struct drm_device *dev = crtc->dev;
2371 	struct drm_i915_private *dev_priv = dev->dev_private;
2372 	u32 dpa_ctl;
2373 
2374 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2375 	dpa_ctl = I915_READ(DP_A);
2376 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2377 
2378 	if (clock < 200000) {
2379 		u32 temp;
2380 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2381 		/* workaround for 160Mhz:
2382 		   1) program 0x4600c bits 15:0 = 0x8124
2383 		   2) program 0x46010 bit 0 = 1
2384 		   3) program 0x46034 bit 24 = 1
2385 		   4) program 0x64000 bit 14 = 1
2386 		   */
2387 		temp = I915_READ(0x4600c);
2388 		temp &= 0xffff0000;
2389 		I915_WRITE(0x4600c, temp | 0x8124);
2390 
2391 		temp = I915_READ(0x46010);
2392 		I915_WRITE(0x46010, temp | 1);
2393 
2394 		temp = I915_READ(0x46034);
2395 		I915_WRITE(0x46034, temp | (1 << 24));
2396 	} else {
2397 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2398 	}
2399 	I915_WRITE(DP_A, dpa_ctl);
2400 
2401 	POSTING_READ(DP_A);
2402 	udelay(500);
2403 }
2404 
intel_fdi_normal_train(struct drm_crtc * crtc)2405 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2406 {
2407 	struct drm_device *dev = crtc->dev;
2408 	struct drm_i915_private *dev_priv = dev->dev_private;
2409 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2410 	int pipe = intel_crtc->pipe;
2411 	u32 reg, temp;
2412 
2413 	/* enable normal train */
2414 	reg = FDI_TX_CTL(pipe);
2415 	temp = I915_READ(reg);
2416 	if (IS_IVYBRIDGE(dev)) {
2417 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2418 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2419 	} else {
2420 		temp &= ~FDI_LINK_TRAIN_NONE;
2421 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2422 	}
2423 	I915_WRITE(reg, temp);
2424 
2425 	reg = FDI_RX_CTL(pipe);
2426 	temp = I915_READ(reg);
2427 	if (HAS_PCH_CPT(dev)) {
2428 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2429 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2430 	} else {
2431 		temp &= ~FDI_LINK_TRAIN_NONE;
2432 		temp |= FDI_LINK_TRAIN_NONE;
2433 	}
2434 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2435 
2436 	/* wait one idle pattern time */
2437 	POSTING_READ(reg);
2438 	udelay(1000);
2439 
2440 	/* IVB wants error correction enabled */
2441 	if (IS_IVYBRIDGE(dev))
2442 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2443 			   FDI_FE_ERRC_ENABLE);
2444 }
2445 
2446 /* The FDI link training functions for ILK/Ibexpeak. */
ironlake_fdi_link_train(struct drm_crtc * crtc)2447 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2448 {
2449 	struct drm_device *dev = crtc->dev;
2450 	struct drm_i915_private *dev_priv = dev->dev_private;
2451 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2452 	int pipe = intel_crtc->pipe;
2453 	int plane = intel_crtc->plane;
2454 	u32 reg, temp, tries;
2455 
2456 	/* FDI needs bits from pipe & plane first */
2457 	assert_pipe_enabled(dev_priv, pipe);
2458 	assert_plane_enabled(dev_priv, plane);
2459 
2460 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2461 	   for train result */
2462 	reg = FDI_RX_IMR(pipe);
2463 	temp = I915_READ(reg);
2464 	temp &= ~FDI_RX_SYMBOL_LOCK;
2465 	temp &= ~FDI_RX_BIT_LOCK;
2466 	I915_WRITE(reg, temp);
2467 	I915_READ(reg);
2468 	udelay(150);
2469 
2470 	/* enable CPU FDI TX and PCH FDI RX */
2471 	reg = FDI_TX_CTL(pipe);
2472 	temp = I915_READ(reg);
2473 	temp &= ~(7 << 19);
2474 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2475 	temp &= ~FDI_LINK_TRAIN_NONE;
2476 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2477 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2478 
2479 	reg = FDI_RX_CTL(pipe);
2480 	temp = I915_READ(reg);
2481 	temp &= ~FDI_LINK_TRAIN_NONE;
2482 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2483 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2484 
2485 	POSTING_READ(reg);
2486 	udelay(150);
2487 
2488 	/* Ironlake workaround, enable clock pointer after FDI enable*/
2489 	if (HAS_PCH_IBX(dev)) {
2490 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2491 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2492 			   FDI_RX_PHASE_SYNC_POINTER_EN);
2493 	}
2494 
2495 	reg = FDI_RX_IIR(pipe);
2496 	for (tries = 0; tries < 5; tries++) {
2497 		temp = I915_READ(reg);
2498 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2499 
2500 		if ((temp & FDI_RX_BIT_LOCK)) {
2501 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2502 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2503 			break;
2504 		}
2505 	}
2506 	if (tries == 5)
2507 		DRM_ERROR("FDI train 1 fail!\n");
2508 
2509 	/* Train 2 */
2510 	reg = FDI_TX_CTL(pipe);
2511 	temp = I915_READ(reg);
2512 	temp &= ~FDI_LINK_TRAIN_NONE;
2513 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2514 	I915_WRITE(reg, temp);
2515 
2516 	reg = FDI_RX_CTL(pipe);
2517 	temp = I915_READ(reg);
2518 	temp &= ~FDI_LINK_TRAIN_NONE;
2519 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2520 	I915_WRITE(reg, temp);
2521 
2522 	POSTING_READ(reg);
2523 	udelay(150);
2524 
2525 	reg = FDI_RX_IIR(pipe);
2526 	for (tries = 0; tries < 5; tries++) {
2527 		temp = I915_READ(reg);
2528 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2529 
2530 		if (temp & FDI_RX_SYMBOL_LOCK) {
2531 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2532 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2533 			break;
2534 		}
2535 	}
2536 	if (tries == 5)
2537 		DRM_ERROR("FDI train 2 fail!\n");
2538 
2539 	DRM_DEBUG_KMS("FDI train done\n");
2540 
2541 }
2542 
2543 static const int snb_b_fdi_train_param[] = {
2544 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2545 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2546 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2547 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2548 };
2549 
2550 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct drm_crtc * crtc)2551 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2552 {
2553 	struct drm_device *dev = crtc->dev;
2554 	struct drm_i915_private *dev_priv = dev->dev_private;
2555 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2556 	int pipe = intel_crtc->pipe;
2557 	u32 reg, temp, i;
2558 
2559 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2560 	   for train result */
2561 	reg = FDI_RX_IMR(pipe);
2562 	temp = I915_READ(reg);
2563 	temp &= ~FDI_RX_SYMBOL_LOCK;
2564 	temp &= ~FDI_RX_BIT_LOCK;
2565 	I915_WRITE(reg, temp);
2566 
2567 	POSTING_READ(reg);
2568 	udelay(150);
2569 
2570 	/* enable CPU FDI TX and PCH FDI RX */
2571 	reg = FDI_TX_CTL(pipe);
2572 	temp = I915_READ(reg);
2573 	temp &= ~(7 << 19);
2574 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2575 	temp &= ~FDI_LINK_TRAIN_NONE;
2576 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2577 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2578 	/* SNB-B */
2579 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2580 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2581 
2582 	reg = FDI_RX_CTL(pipe);
2583 	temp = I915_READ(reg);
2584 	if (HAS_PCH_CPT(dev)) {
2585 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2586 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2587 	} else {
2588 		temp &= ~FDI_LINK_TRAIN_NONE;
2589 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2590 	}
2591 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2592 
2593 	POSTING_READ(reg);
2594 	udelay(150);
2595 
2596 	for (i = 0; i < 4; i++) {
2597 		reg = FDI_TX_CTL(pipe);
2598 		temp = I915_READ(reg);
2599 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2600 		temp |= snb_b_fdi_train_param[i];
2601 		I915_WRITE(reg, temp);
2602 
2603 		POSTING_READ(reg);
2604 		udelay(500);
2605 
2606 		reg = FDI_RX_IIR(pipe);
2607 		temp = I915_READ(reg);
2608 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2609 
2610 		if (temp & FDI_RX_BIT_LOCK) {
2611 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2612 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2613 			break;
2614 		}
2615 	}
2616 	if (i == 4)
2617 		DRM_ERROR("FDI train 1 fail!\n");
2618 
2619 	/* Train 2 */
2620 	reg = FDI_TX_CTL(pipe);
2621 	temp = I915_READ(reg);
2622 	temp &= ~FDI_LINK_TRAIN_NONE;
2623 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2624 	if (IS_GEN6(dev)) {
2625 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2626 		/* SNB-B */
2627 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2628 	}
2629 	I915_WRITE(reg, temp);
2630 
2631 	reg = FDI_RX_CTL(pipe);
2632 	temp = I915_READ(reg);
2633 	if (HAS_PCH_CPT(dev)) {
2634 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2635 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2636 	} else {
2637 		temp &= ~FDI_LINK_TRAIN_NONE;
2638 		temp |= FDI_LINK_TRAIN_PATTERN_2;
2639 	}
2640 	I915_WRITE(reg, temp);
2641 
2642 	POSTING_READ(reg);
2643 	udelay(150);
2644 
2645 	for (i = 0; i < 4; i++) {
2646 		reg = FDI_TX_CTL(pipe);
2647 		temp = I915_READ(reg);
2648 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2649 		temp |= snb_b_fdi_train_param[i];
2650 		I915_WRITE(reg, temp);
2651 
2652 		POSTING_READ(reg);
2653 		udelay(500);
2654 
2655 		reg = FDI_RX_IIR(pipe);
2656 		temp = I915_READ(reg);
2657 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2658 
2659 		if (temp & FDI_RX_SYMBOL_LOCK) {
2660 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2661 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2662 			break;
2663 		}
2664 	}
2665 	if (i == 4)
2666 		DRM_ERROR("FDI train 2 fail!\n");
2667 
2668 	DRM_DEBUG_KMS("FDI train done.\n");
2669 }
2670 
2671 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct drm_crtc * crtc)2672 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2673 {
2674 	struct drm_device *dev = crtc->dev;
2675 	struct drm_i915_private *dev_priv = dev->dev_private;
2676 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2677 	int pipe = intel_crtc->pipe;
2678 	u32 reg, temp, i;
2679 
2680 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2681 	   for train result */
2682 	reg = FDI_RX_IMR(pipe);
2683 	temp = I915_READ(reg);
2684 	temp &= ~FDI_RX_SYMBOL_LOCK;
2685 	temp &= ~FDI_RX_BIT_LOCK;
2686 	I915_WRITE(reg, temp);
2687 
2688 	POSTING_READ(reg);
2689 	udelay(150);
2690 
2691 	/* enable CPU FDI TX and PCH FDI RX */
2692 	reg = FDI_TX_CTL(pipe);
2693 	temp = I915_READ(reg);
2694 	temp &= ~(7 << 19);
2695 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2696 	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2697 	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2698 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2699 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2700 	temp |= FDI_COMPOSITE_SYNC;
2701 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2702 
2703 	reg = FDI_RX_CTL(pipe);
2704 	temp = I915_READ(reg);
2705 	temp &= ~FDI_LINK_TRAIN_AUTO;
2706 	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2707 	temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2708 	temp |= FDI_COMPOSITE_SYNC;
2709 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2710 
2711 	POSTING_READ(reg);
2712 	udelay(150);
2713 
2714 	for (i = 0; i < 4; i++) {
2715 		reg = FDI_TX_CTL(pipe);
2716 		temp = I915_READ(reg);
2717 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2718 		temp |= snb_b_fdi_train_param[i];
2719 		I915_WRITE(reg, temp);
2720 
2721 		POSTING_READ(reg);
2722 		udelay(500);
2723 
2724 		reg = FDI_RX_IIR(pipe);
2725 		temp = I915_READ(reg);
2726 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2727 
2728 		if (temp & FDI_RX_BIT_LOCK ||
2729 		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2730 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2731 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2732 			break;
2733 		}
2734 	}
2735 	if (i == 4)
2736 		DRM_ERROR("FDI train 1 fail!\n");
2737 
2738 	/* Train 2 */
2739 	reg = FDI_TX_CTL(pipe);
2740 	temp = I915_READ(reg);
2741 	temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2742 	temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2743 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2744 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2745 	I915_WRITE(reg, temp);
2746 
2747 	reg = FDI_RX_CTL(pipe);
2748 	temp = I915_READ(reg);
2749 	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2750 	temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2751 	I915_WRITE(reg, temp);
2752 
2753 	POSTING_READ(reg);
2754 	udelay(150);
2755 
2756 	for (i = 0; i < 4; i++) {
2757 		reg = FDI_TX_CTL(pipe);
2758 		temp = I915_READ(reg);
2759 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2760 		temp |= snb_b_fdi_train_param[i];
2761 		I915_WRITE(reg, temp);
2762 
2763 		POSTING_READ(reg);
2764 		udelay(500);
2765 
2766 		reg = FDI_RX_IIR(pipe);
2767 		temp = I915_READ(reg);
2768 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2769 
2770 		if (temp & FDI_RX_SYMBOL_LOCK) {
2771 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2772 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2773 			break;
2774 		}
2775 	}
2776 	if (i == 4)
2777 		DRM_ERROR("FDI train 2 fail!\n");
2778 
2779 	DRM_DEBUG_KMS("FDI train done.\n");
2780 }
2781 
ironlake_fdi_pll_enable(struct drm_crtc * crtc)2782 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2783 {
2784 	struct drm_device *dev = crtc->dev;
2785 	struct drm_i915_private *dev_priv = dev->dev_private;
2786 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2787 	int pipe = intel_crtc->pipe;
2788 	u32 reg, temp;
2789 
2790 	/* Write the TU size bits so error detection works */
2791 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2792 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2793 
2794 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2795 	reg = FDI_RX_CTL(pipe);
2796 	temp = I915_READ(reg);
2797 	temp &= ~((0x7 << 19) | (0x7 << 16));
2798 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2799 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2800 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2801 
2802 	POSTING_READ(reg);
2803 	udelay(200);
2804 
2805 	/* Switch from Rawclk to PCDclk */
2806 	temp = I915_READ(reg);
2807 	I915_WRITE(reg, temp | FDI_PCDCLK);
2808 
2809 	POSTING_READ(reg);
2810 	udelay(200);
2811 
2812 	/* Enable CPU FDI TX PLL, always on for Ironlake */
2813 	reg = FDI_TX_CTL(pipe);
2814 	temp = I915_READ(reg);
2815 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2816 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2817 
2818 		POSTING_READ(reg);
2819 		udelay(100);
2820 	}
2821 }
2822 
ironlake_fdi_disable(struct drm_crtc * crtc)2823 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2824 {
2825 	struct drm_device *dev = crtc->dev;
2826 	struct drm_i915_private *dev_priv = dev->dev_private;
2827 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2828 	int pipe = intel_crtc->pipe;
2829 	u32 reg, temp;
2830 
2831 	/* disable CPU FDI tx and PCH FDI rx */
2832 	reg = FDI_TX_CTL(pipe);
2833 	temp = I915_READ(reg);
2834 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2835 	POSTING_READ(reg);
2836 
2837 	reg = FDI_RX_CTL(pipe);
2838 	temp = I915_READ(reg);
2839 	temp &= ~(0x7 << 16);
2840 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2841 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2842 
2843 	POSTING_READ(reg);
2844 	udelay(100);
2845 
2846 	/* Ironlake workaround, disable clock pointer after downing FDI */
2847 	if (HAS_PCH_IBX(dev)) {
2848 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2849 		I915_WRITE(FDI_RX_CHICKEN(pipe),
2850 			   I915_READ(FDI_RX_CHICKEN(pipe) &
2851 				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2852 	}
2853 
2854 	/* still set train pattern 1 */
2855 	reg = FDI_TX_CTL(pipe);
2856 	temp = I915_READ(reg);
2857 	temp &= ~FDI_LINK_TRAIN_NONE;
2858 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2859 	I915_WRITE(reg, temp);
2860 
2861 	reg = FDI_RX_CTL(pipe);
2862 	temp = I915_READ(reg);
2863 	if (HAS_PCH_CPT(dev)) {
2864 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2865 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2866 	} else {
2867 		temp &= ~FDI_LINK_TRAIN_NONE;
2868 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2869 	}
2870 	/* BPC in FDI rx is consistent with that in PIPECONF */
2871 	temp &= ~(0x07 << 16);
2872 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2873 	I915_WRITE(reg, temp);
2874 
2875 	POSTING_READ(reg);
2876 	udelay(100);
2877 }
2878 
2879 /*
2880  * When we disable a pipe, we need to clear any pending scanline wait events
2881  * to avoid hanging the ring, which we assume we are waiting on.
2882  */
intel_clear_scanline_wait(struct drm_device * dev)2883 static void intel_clear_scanline_wait(struct drm_device *dev)
2884 {
2885 	struct drm_i915_private *dev_priv = dev->dev_private;
2886 	struct intel_ring_buffer *ring;
2887 	u32 tmp;
2888 
2889 	if (IS_GEN2(dev))
2890 		/* Can't break the hang on i8xx */
2891 		return;
2892 
2893 	ring = LP_RING(dev_priv);
2894 	tmp = I915_READ_CTL(ring);
2895 	if (tmp & RING_WAIT)
2896 		I915_WRITE_CTL(ring, tmp);
2897 }
2898 
intel_crtc_has_pending_flip(struct drm_crtc * crtc)2899 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2900 {
2901 	struct drm_device *dev = crtc->dev;
2902 	struct drm_i915_private *dev_priv = dev->dev_private;
2903 	unsigned long flags;
2904 	bool pending;
2905 
2906 	if (atomic_read(&dev_priv->mm.wedged))
2907 		return false;
2908 
2909 	spin_lock_irqsave(&dev->event_lock, flags);
2910 	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2911 	spin_unlock_irqrestore(&dev->event_lock, flags);
2912 
2913 	return pending;
2914 }
2915 
intel_crtc_wait_for_pending_flips(struct drm_crtc * crtc)2916 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2917 {
2918 	struct drm_device *dev = crtc->dev;
2919 	struct drm_i915_private *dev_priv = dev->dev_private;
2920 
2921 	if (crtc->fb == NULL)
2922 		return;
2923 
2924 	wait_event(dev_priv->pending_flip_queue,
2925 		   !intel_crtc_has_pending_flip(crtc));
2926 
2927 	mutex_lock(&dev->struct_mutex);
2928 	intel_finish_fb(crtc->fb);
2929 	mutex_unlock(&dev->struct_mutex);
2930 }
2931 
intel_crtc_driving_pch(struct drm_crtc * crtc)2932 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2933 {
2934 	struct drm_device *dev = crtc->dev;
2935 	struct drm_mode_config *mode_config = &dev->mode_config;
2936 	struct intel_encoder *encoder;
2937 
2938 	/*
2939 	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2940 	 * must be driven by its own crtc; no sharing is possible.
2941 	 */
2942 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2943 		if (encoder->base.crtc != crtc)
2944 			continue;
2945 
2946 		switch (encoder->type) {
2947 		case INTEL_OUTPUT_EDP:
2948 			if (!intel_encoder_is_pch_edp(&encoder->base))
2949 				return false;
2950 			continue;
2951 		}
2952 	}
2953 
2954 	return true;
2955 }
2956 
2957 /*
2958  * Enable PCH resources required for PCH ports:
2959  *   - PCH PLLs
2960  *   - FDI training & RX/TX
2961  *   - update transcoder timings
2962  *   - DP transcoding bits
2963  *   - transcoder
2964  */
ironlake_pch_enable(struct drm_crtc * crtc)2965 static void ironlake_pch_enable(struct drm_crtc *crtc)
2966 {
2967 	struct drm_device *dev = crtc->dev;
2968 	struct drm_i915_private *dev_priv = dev->dev_private;
2969 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2970 	int pipe = intel_crtc->pipe;
2971 	u32 reg, temp, transc_sel;
2972 
2973 	/* For PCH output, training FDI link */
2974 	dev_priv->display.fdi_link_train(crtc);
2975 
2976 	intel_enable_pch_pll(dev_priv, pipe);
2977 
2978 	if (HAS_PCH_CPT(dev)) {
2979 		transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
2980 			TRANSC_DPLLB_SEL;
2981 
2982 		/* Be sure PCH DPLL SEL is set */
2983 		temp = I915_READ(PCH_DPLL_SEL);
2984 		if (pipe == 0) {
2985 			temp &= ~(TRANSA_DPLLB_SEL);
2986 			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2987 		} else if (pipe == 1) {
2988 			temp &= ~(TRANSB_DPLLB_SEL);
2989 			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2990 		} else if (pipe == 2) {
2991 			temp &= ~(TRANSC_DPLLB_SEL);
2992 			temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2993 		}
2994 		I915_WRITE(PCH_DPLL_SEL, temp);
2995 	}
2996 
2997 	/* set transcoder timing, panel must allow it */
2998 	assert_panel_unlocked(dev_priv, pipe);
2999 	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3000 	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3001 	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3002 
3003 	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3004 	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3005 	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3006 	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3007 
3008 	intel_fdi_normal_train(crtc);
3009 
3010 	/* For PCH DP, enable TRANS_DP_CTL */
3011 	if (HAS_PCH_CPT(dev) &&
3012 	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3013 	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3014 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3015 		reg = TRANS_DP_CTL(pipe);
3016 		temp = I915_READ(reg);
3017 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3018 			  TRANS_DP_SYNC_MASK |
3019 			  TRANS_DP_BPC_MASK);
3020 		temp |= (TRANS_DP_OUTPUT_ENABLE |
3021 			 TRANS_DP_ENH_FRAMING);
3022 		temp |= bpc << 9; /* same format but at 11:9 */
3023 
3024 		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3025 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3026 		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3027 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3028 
3029 		switch (intel_trans_dp_port_sel(crtc)) {
3030 		case PCH_DP_B:
3031 			temp |= TRANS_DP_PORT_SEL_B;
3032 			break;
3033 		case PCH_DP_C:
3034 			temp |= TRANS_DP_PORT_SEL_C;
3035 			break;
3036 		case PCH_DP_D:
3037 			temp |= TRANS_DP_PORT_SEL_D;
3038 			break;
3039 		default:
3040 			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3041 			temp |= TRANS_DP_PORT_SEL_B;
3042 			break;
3043 		}
3044 
3045 		I915_WRITE(reg, temp);
3046 	}
3047 
3048 	intel_enable_transcoder(dev_priv, pipe);
3049 }
3050 
intel_cpt_verify_modeset(struct drm_device * dev,int pipe)3051 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3052 {
3053 	struct drm_i915_private *dev_priv = dev->dev_private;
3054 	int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3055 	u32 temp;
3056 
3057 	temp = I915_READ(dslreg);
3058 	udelay(500);
3059 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3060 		/* Without this, mode sets may fail silently on FDI */
3061 		I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3062 		udelay(250);
3063 		I915_WRITE(tc2reg, 0);
3064 		if (wait_for(I915_READ(dslreg) != temp, 5))
3065 			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3066 	}
3067 }
3068 
ironlake_crtc_enable(struct drm_crtc * crtc)3069 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3070 {
3071 	struct drm_device *dev = crtc->dev;
3072 	struct drm_i915_private *dev_priv = dev->dev_private;
3073 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3074 	int pipe = intel_crtc->pipe;
3075 	int plane = intel_crtc->plane;
3076 	u32 temp;
3077 	bool is_pch_port;
3078 
3079 	if (intel_crtc->active)
3080 		return;
3081 
3082 	intel_crtc->active = true;
3083 	intel_update_watermarks(dev);
3084 
3085 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3086 		temp = I915_READ(PCH_LVDS);
3087 		if ((temp & LVDS_PORT_EN) == 0)
3088 			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3089 	}
3090 
3091 	is_pch_port = intel_crtc_driving_pch(crtc);
3092 
3093 	if (is_pch_port)
3094 		ironlake_fdi_pll_enable(crtc);
3095 	else
3096 		ironlake_fdi_disable(crtc);
3097 
3098 	/* Enable panel fitting for LVDS */
3099 	if (dev_priv->pch_pf_size &&
3100 	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3101 		/* Force use of hard-coded filter coefficients
3102 		 * as some pre-programmed values are broken,
3103 		 * e.g. x201.
3104 		 */
3105 		if (IS_IVYBRIDGE(dev))
3106 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3107 						 PF_PIPE_SEL_IVB(pipe));
3108 		else
3109 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3110 		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3111 		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3112 	}
3113 
3114 	/*
3115 	 * On ILK+ LUT must be loaded before the pipe is running but with
3116 	 * clocks enabled
3117 	 */
3118 	intel_crtc_load_lut(crtc);
3119 
3120 	intel_enable_pipe(dev_priv, pipe, is_pch_port);
3121 	intel_enable_plane(dev_priv, plane, pipe);
3122 
3123 	if (is_pch_port)
3124 		ironlake_pch_enable(crtc);
3125 
3126 	mutex_lock(&dev->struct_mutex);
3127 	intel_update_fbc(dev);
3128 	mutex_unlock(&dev->struct_mutex);
3129 
3130 	intel_crtc_update_cursor(crtc, true);
3131 }
3132 
ironlake_crtc_disable(struct drm_crtc * crtc)3133 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3134 {
3135 	struct drm_device *dev = crtc->dev;
3136 	struct drm_i915_private *dev_priv = dev->dev_private;
3137 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3138 	int pipe = intel_crtc->pipe;
3139 	int plane = intel_crtc->plane;
3140 	u32 reg, temp;
3141 
3142 	if (!intel_crtc->active)
3143 		return;
3144 
3145 	intel_crtc_wait_for_pending_flips(crtc);
3146 	drm_vblank_off(dev, pipe);
3147 	intel_crtc_update_cursor(crtc, false);
3148 
3149 	intel_disable_plane(dev_priv, plane, pipe);
3150 
3151 	if (dev_priv->cfb_plane == plane)
3152 		intel_disable_fbc(dev);
3153 
3154 	intel_disable_pipe(dev_priv, pipe);
3155 
3156 	/* Disable PF */
3157 	I915_WRITE(PF_CTL(pipe), 0);
3158 	I915_WRITE(PF_WIN_SZ(pipe), 0);
3159 
3160 	ironlake_fdi_disable(crtc);
3161 
3162 	/* This is a horrible layering violation; we should be doing this in
3163 	 * the connector/encoder ->prepare instead, but we don't always have
3164 	 * enough information there about the config to know whether it will
3165 	 * actually be necessary or just cause undesired flicker.
3166 	 */
3167 	intel_disable_pch_ports(dev_priv, pipe);
3168 
3169 	intel_disable_transcoder(dev_priv, pipe);
3170 
3171 	if (HAS_PCH_CPT(dev)) {
3172 		/* disable TRANS_DP_CTL */
3173 		reg = TRANS_DP_CTL(pipe);
3174 		temp = I915_READ(reg);
3175 		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3176 		temp |= TRANS_DP_PORT_SEL_NONE;
3177 		I915_WRITE(reg, temp);
3178 
3179 		/* disable DPLL_SEL */
3180 		temp = I915_READ(PCH_DPLL_SEL);
3181 		switch (pipe) {
3182 		case 0:
3183 			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3184 			break;
3185 		case 1:
3186 			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3187 			break;
3188 		case 2:
3189 			/* C shares PLL A or B */
3190 			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3191 			break;
3192 		default:
3193 			BUG(); /* wtf */
3194 		}
3195 		I915_WRITE(PCH_DPLL_SEL, temp);
3196 	}
3197 
3198 	/* disable PCH DPLL */
3199 	if (!intel_crtc->no_pll)
3200 		intel_disable_pch_pll(dev_priv, pipe);
3201 
3202 	/* Switch from PCDclk to Rawclk */
3203 	reg = FDI_RX_CTL(pipe);
3204 	temp = I915_READ(reg);
3205 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3206 
3207 	/* Disable CPU FDI TX PLL */
3208 	reg = FDI_TX_CTL(pipe);
3209 	temp = I915_READ(reg);
3210 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3211 
3212 	POSTING_READ(reg);
3213 	udelay(100);
3214 
3215 	reg = FDI_RX_CTL(pipe);
3216 	temp = I915_READ(reg);
3217 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3218 
3219 	/* Wait for the clocks to turn off. */
3220 	POSTING_READ(reg);
3221 	udelay(100);
3222 
3223 	intel_crtc->active = false;
3224 	intel_update_watermarks(dev);
3225 
3226 	mutex_lock(&dev->struct_mutex);
3227 	intel_update_fbc(dev);
3228 	intel_clear_scanline_wait(dev);
3229 	mutex_unlock(&dev->struct_mutex);
3230 }
3231 
ironlake_crtc_dpms(struct drm_crtc * crtc,int mode)3232 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3233 {
3234 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3235 	int pipe = intel_crtc->pipe;
3236 	int plane = intel_crtc->plane;
3237 
3238 	/* XXX: When our outputs are all unaware of DPMS modes other than off
3239 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3240 	 */
3241 	switch (mode) {
3242 	case DRM_MODE_DPMS_ON:
3243 	case DRM_MODE_DPMS_STANDBY:
3244 	case DRM_MODE_DPMS_SUSPEND:
3245 		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3246 		ironlake_crtc_enable(crtc);
3247 		break;
3248 
3249 	case DRM_MODE_DPMS_OFF:
3250 		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3251 		ironlake_crtc_disable(crtc);
3252 		break;
3253 	}
3254 }
3255 
intel_crtc_dpms_overlay(struct intel_crtc * intel_crtc,bool enable)3256 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3257 {
3258 	if (!enable && intel_crtc->overlay) {
3259 		struct drm_device *dev = intel_crtc->base.dev;
3260 		struct drm_i915_private *dev_priv = dev->dev_private;
3261 
3262 		mutex_lock(&dev->struct_mutex);
3263 		dev_priv->mm.interruptible = false;
3264 		(void) intel_overlay_switch_off(intel_crtc->overlay);
3265 		dev_priv->mm.interruptible = true;
3266 		mutex_unlock(&dev->struct_mutex);
3267 	}
3268 
3269 	/* Let userspace switch the overlay on again. In most cases userspace
3270 	 * has to recompute where to put it anyway.
3271 	 */
3272 }
3273 
i9xx_crtc_enable(struct drm_crtc * crtc)3274 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3275 {
3276 	struct drm_device *dev = crtc->dev;
3277 	struct drm_i915_private *dev_priv = dev->dev_private;
3278 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3279 	int pipe = intel_crtc->pipe;
3280 	int plane = intel_crtc->plane;
3281 
3282 	if (intel_crtc->active)
3283 		return;
3284 
3285 	intel_crtc->active = true;
3286 	intel_update_watermarks(dev);
3287 
3288 	intel_enable_pll(dev_priv, pipe);
3289 	intel_enable_pipe(dev_priv, pipe, false);
3290 	intel_enable_plane(dev_priv, plane, pipe);
3291 
3292 	intel_crtc_load_lut(crtc);
3293 	intel_update_fbc(dev);
3294 
3295 	/* Give the overlay scaler a chance to enable if it's on this pipe */
3296 	intel_crtc_dpms_overlay(intel_crtc, true);
3297 	intel_crtc_update_cursor(crtc, true);
3298 }
3299 
i9xx_crtc_disable(struct drm_crtc * crtc)3300 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3301 {
3302 	struct drm_device *dev = crtc->dev;
3303 	struct drm_i915_private *dev_priv = dev->dev_private;
3304 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3305 	int pipe = intel_crtc->pipe;
3306 	int plane = intel_crtc->plane;
3307 	u32 pctl;
3308 
3309 	if (!intel_crtc->active)
3310 		return;
3311 
3312 	/* Give the overlay scaler a chance to disable if it's on this pipe */
3313 	intel_crtc_wait_for_pending_flips(crtc);
3314 	drm_vblank_off(dev, pipe);
3315 	intel_crtc_dpms_overlay(intel_crtc, false);
3316 	intel_crtc_update_cursor(crtc, false);
3317 
3318 	if (dev_priv->cfb_plane == plane)
3319 		intel_disable_fbc(dev);
3320 
3321 	intel_disable_plane(dev_priv, plane, pipe);
3322 	intel_disable_pipe(dev_priv, pipe);
3323 
3324 	/* Disable pannel fitter if it is on this pipe. */
3325 	pctl = I915_READ(PFIT_CONTROL);
3326 	if ((pctl & PFIT_ENABLE) &&
3327 	    ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
3328 		I915_WRITE(PFIT_CONTROL, 0);
3329 
3330 	intel_disable_pll(dev_priv, pipe);
3331 
3332 	intel_crtc->active = false;
3333 	intel_update_fbc(dev);
3334 	intel_update_watermarks(dev);
3335 	intel_clear_scanline_wait(dev);
3336 }
3337 
i9xx_crtc_dpms(struct drm_crtc * crtc,int mode)3338 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3339 {
3340 	/* XXX: When our outputs are all unaware of DPMS modes other than off
3341 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3342 	 */
3343 	switch (mode) {
3344 	case DRM_MODE_DPMS_ON:
3345 	case DRM_MODE_DPMS_STANDBY:
3346 	case DRM_MODE_DPMS_SUSPEND:
3347 		i9xx_crtc_enable(crtc);
3348 		break;
3349 	case DRM_MODE_DPMS_OFF:
3350 		i9xx_crtc_disable(crtc);
3351 		break;
3352 	}
3353 }
3354 
3355 /**
3356  * Sets the power management mode of the pipe and plane.
3357  */
intel_crtc_dpms(struct drm_crtc * crtc,int mode)3358 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3359 {
3360 	struct drm_device *dev = crtc->dev;
3361 	struct drm_i915_private *dev_priv = dev->dev_private;
3362 	struct drm_i915_master_private *master_priv;
3363 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3364 	int pipe = intel_crtc->pipe;
3365 	bool enabled;
3366 
3367 	if (intel_crtc->dpms_mode == mode)
3368 		return;
3369 
3370 	intel_crtc->dpms_mode = mode;
3371 
3372 	dev_priv->display.dpms(crtc, mode);
3373 
3374 	if (!dev->primary->master)
3375 		return;
3376 
3377 	master_priv = dev->primary->master->driver_priv;
3378 	if (!master_priv->sarea_priv)
3379 		return;
3380 
3381 	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3382 
3383 	switch (pipe) {
3384 	case 0:
3385 		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3386 		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3387 		break;
3388 	case 1:
3389 		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3390 		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3391 		break;
3392 	default:
3393 		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3394 		break;
3395 	}
3396 }
3397 
intel_crtc_disable(struct drm_crtc * crtc)3398 static void intel_crtc_disable(struct drm_crtc *crtc)
3399 {
3400 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3401 	struct drm_device *dev = crtc->dev;
3402 
3403 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3404 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3405 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3406 
3407 	if (crtc->fb) {
3408 		mutex_lock(&dev->struct_mutex);
3409 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3410 		mutex_unlock(&dev->struct_mutex);
3411 	}
3412 }
3413 
3414 /* Prepare for a mode set.
3415  *
3416  * Note we could be a lot smarter here.  We need to figure out which outputs
3417  * will be enabled, which disabled (in short, how the config will changes)
3418  * and perform the minimum necessary steps to accomplish that, e.g. updating
3419  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3420  * panel fitting is in the proper state, etc.
3421  */
i9xx_crtc_prepare(struct drm_crtc * crtc)3422 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3423 {
3424 	i9xx_crtc_disable(crtc);
3425 }
3426 
i9xx_crtc_commit(struct drm_crtc * crtc)3427 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3428 {
3429 	i9xx_crtc_enable(crtc);
3430 }
3431 
ironlake_crtc_prepare(struct drm_crtc * crtc)3432 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3433 {
3434 	ironlake_crtc_disable(crtc);
3435 }
3436 
ironlake_crtc_commit(struct drm_crtc * crtc)3437 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3438 {
3439 	ironlake_crtc_enable(crtc);
3440 }
3441 
intel_encoder_prepare(struct drm_encoder * encoder)3442 void intel_encoder_prepare(struct drm_encoder *encoder)
3443 {
3444 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3445 	/* lvds has its own version of prepare see intel_lvds_prepare */
3446 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3447 }
3448 
intel_encoder_commit(struct drm_encoder * encoder)3449 void intel_encoder_commit(struct drm_encoder *encoder)
3450 {
3451 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3452 	struct drm_device *dev = encoder->dev;
3453 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3454 	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3455 
3456 	/* lvds has its own version of commit see intel_lvds_commit */
3457 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3458 
3459 	if (HAS_PCH_CPT(dev))
3460 		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3461 }
3462 
intel_encoder_destroy(struct drm_encoder * encoder)3463 void intel_encoder_destroy(struct drm_encoder *encoder)
3464 {
3465 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3466 
3467 	drm_encoder_cleanup(encoder);
3468 	kfree(intel_encoder);
3469 }
3470 
intel_crtc_mode_fixup(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3471 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3472 				  struct drm_display_mode *mode,
3473 				  struct drm_display_mode *adjusted_mode)
3474 {
3475 	struct drm_device *dev = crtc->dev;
3476 
3477 	if (HAS_PCH_SPLIT(dev)) {
3478 		/* FDI link clock is fixed at 2.7G */
3479 		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3480 			return false;
3481 	}
3482 
3483 	/* All interlaced capable intel hw wants timings in frames. Note though
3484 	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3485 	 * timings, so we need to be careful not to clobber these.*/
3486 	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3487 		drm_mode_set_crtcinfo(adjusted_mode, 0);
3488 
3489 	return true;
3490 }
3491 
i945_get_display_clock_speed(struct drm_device * dev)3492 static int i945_get_display_clock_speed(struct drm_device *dev)
3493 {
3494 	return 400000;
3495 }
3496 
i915_get_display_clock_speed(struct drm_device * dev)3497 static int i915_get_display_clock_speed(struct drm_device *dev)
3498 {
3499 	return 333000;
3500 }
3501 
i9xx_misc_get_display_clock_speed(struct drm_device * dev)3502 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3503 {
3504 	return 200000;
3505 }
3506 
i915gm_get_display_clock_speed(struct drm_device * dev)3507 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3508 {
3509 	u16 gcfgc = 0;
3510 
3511 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3512 
3513 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3514 		return 133000;
3515 	else {
3516 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3517 		case GC_DISPLAY_CLOCK_333_MHZ:
3518 			return 333000;
3519 		default:
3520 		case GC_DISPLAY_CLOCK_190_200_MHZ:
3521 			return 190000;
3522 		}
3523 	}
3524 }
3525 
i865_get_display_clock_speed(struct drm_device * dev)3526 static int i865_get_display_clock_speed(struct drm_device *dev)
3527 {
3528 	return 266000;
3529 }
3530 
i855_get_display_clock_speed(struct drm_device * dev)3531 static int i855_get_display_clock_speed(struct drm_device *dev)
3532 {
3533 	u16 hpllcc = 0;
3534 	/* Assume that the hardware is in the high speed state.  This
3535 	 * should be the default.
3536 	 */
3537 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3538 	case GC_CLOCK_133_200:
3539 	case GC_CLOCK_100_200:
3540 		return 200000;
3541 	case GC_CLOCK_166_250:
3542 		return 250000;
3543 	case GC_CLOCK_100_133:
3544 		return 133000;
3545 	}
3546 
3547 	/* Shouldn't happen */
3548 	return 0;
3549 }
3550 
i830_get_display_clock_speed(struct drm_device * dev)3551 static int i830_get_display_clock_speed(struct drm_device *dev)
3552 {
3553 	return 133000;
3554 }
3555 
3556 struct fdi_m_n {
3557 	u32        tu;
3558 	u32        gmch_m;
3559 	u32        gmch_n;
3560 	u32        link_m;
3561 	u32        link_n;
3562 };
3563 
3564 static void
fdi_reduce_ratio(u32 * num,u32 * den)3565 fdi_reduce_ratio(u32 *num, u32 *den)
3566 {
3567 	while (*num > 0xffffff || *den > 0xffffff) {
3568 		*num >>= 1;
3569 		*den >>= 1;
3570 	}
3571 }
3572 
3573 static void
ironlake_compute_m_n(int bits_per_pixel,int nlanes,int pixel_clock,int link_clock,struct fdi_m_n * m_n)3574 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3575 		     int link_clock, struct fdi_m_n *m_n)
3576 {
3577 	m_n->tu = 64; /* default size */
3578 
3579 	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3580 	m_n->gmch_m = bits_per_pixel * pixel_clock;
3581 	m_n->gmch_n = link_clock * nlanes * 8;
3582 	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3583 
3584 	m_n->link_m = pixel_clock;
3585 	m_n->link_n = link_clock;
3586 	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3587 }
3588 
3589 
3590 struct intel_watermark_params {
3591 	unsigned long fifo_size;
3592 	unsigned long max_wm;
3593 	unsigned long default_wm;
3594 	unsigned long guard_size;
3595 	unsigned long cacheline_size;
3596 };
3597 
3598 /* Pineview has different values for various configs */
3599 static const struct intel_watermark_params pineview_display_wm = {
3600 	PINEVIEW_DISPLAY_FIFO,
3601 	PINEVIEW_MAX_WM,
3602 	PINEVIEW_DFT_WM,
3603 	PINEVIEW_GUARD_WM,
3604 	PINEVIEW_FIFO_LINE_SIZE
3605 };
3606 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3607 	PINEVIEW_DISPLAY_FIFO,
3608 	PINEVIEW_MAX_WM,
3609 	PINEVIEW_DFT_HPLLOFF_WM,
3610 	PINEVIEW_GUARD_WM,
3611 	PINEVIEW_FIFO_LINE_SIZE
3612 };
3613 static const struct intel_watermark_params pineview_cursor_wm = {
3614 	PINEVIEW_CURSOR_FIFO,
3615 	PINEVIEW_CURSOR_MAX_WM,
3616 	PINEVIEW_CURSOR_DFT_WM,
3617 	PINEVIEW_CURSOR_GUARD_WM,
3618 	PINEVIEW_FIFO_LINE_SIZE,
3619 };
3620 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3621 	PINEVIEW_CURSOR_FIFO,
3622 	PINEVIEW_CURSOR_MAX_WM,
3623 	PINEVIEW_CURSOR_DFT_WM,
3624 	PINEVIEW_CURSOR_GUARD_WM,
3625 	PINEVIEW_FIFO_LINE_SIZE
3626 };
3627 static const struct intel_watermark_params g4x_wm_info = {
3628 	G4X_FIFO_SIZE,
3629 	G4X_MAX_WM,
3630 	G4X_MAX_WM,
3631 	2,
3632 	G4X_FIFO_LINE_SIZE,
3633 };
3634 static const struct intel_watermark_params g4x_cursor_wm_info = {
3635 	I965_CURSOR_FIFO,
3636 	I965_CURSOR_MAX_WM,
3637 	I965_CURSOR_DFT_WM,
3638 	2,
3639 	G4X_FIFO_LINE_SIZE,
3640 };
3641 static const struct intel_watermark_params i965_cursor_wm_info = {
3642 	I965_CURSOR_FIFO,
3643 	I965_CURSOR_MAX_WM,
3644 	I965_CURSOR_DFT_WM,
3645 	2,
3646 	I915_FIFO_LINE_SIZE,
3647 };
3648 static const struct intel_watermark_params i945_wm_info = {
3649 	I945_FIFO_SIZE,
3650 	I915_MAX_WM,
3651 	1,
3652 	2,
3653 	I915_FIFO_LINE_SIZE
3654 };
3655 static const struct intel_watermark_params i915_wm_info = {
3656 	I915_FIFO_SIZE,
3657 	I915_MAX_WM,
3658 	1,
3659 	2,
3660 	I915_FIFO_LINE_SIZE
3661 };
3662 static const struct intel_watermark_params i855_wm_info = {
3663 	I855GM_FIFO_SIZE,
3664 	I915_MAX_WM,
3665 	1,
3666 	2,
3667 	I830_FIFO_LINE_SIZE
3668 };
3669 static const struct intel_watermark_params i830_wm_info = {
3670 	I830_FIFO_SIZE,
3671 	I915_MAX_WM,
3672 	1,
3673 	2,
3674 	I830_FIFO_LINE_SIZE
3675 };
3676 
3677 static const struct intel_watermark_params ironlake_display_wm_info = {
3678 	ILK_DISPLAY_FIFO,
3679 	ILK_DISPLAY_MAXWM,
3680 	ILK_DISPLAY_DFTWM,
3681 	2,
3682 	ILK_FIFO_LINE_SIZE
3683 };
3684 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3685 	ILK_CURSOR_FIFO,
3686 	ILK_CURSOR_MAXWM,
3687 	ILK_CURSOR_DFTWM,
3688 	2,
3689 	ILK_FIFO_LINE_SIZE
3690 };
3691 static const struct intel_watermark_params ironlake_display_srwm_info = {
3692 	ILK_DISPLAY_SR_FIFO,
3693 	ILK_DISPLAY_MAX_SRWM,
3694 	ILK_DISPLAY_DFT_SRWM,
3695 	2,
3696 	ILK_FIFO_LINE_SIZE
3697 };
3698 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3699 	ILK_CURSOR_SR_FIFO,
3700 	ILK_CURSOR_MAX_SRWM,
3701 	ILK_CURSOR_DFT_SRWM,
3702 	2,
3703 	ILK_FIFO_LINE_SIZE
3704 };
3705 
3706 static const struct intel_watermark_params sandybridge_display_wm_info = {
3707 	SNB_DISPLAY_FIFO,
3708 	SNB_DISPLAY_MAXWM,
3709 	SNB_DISPLAY_DFTWM,
3710 	2,
3711 	SNB_FIFO_LINE_SIZE
3712 };
3713 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3714 	SNB_CURSOR_FIFO,
3715 	SNB_CURSOR_MAXWM,
3716 	SNB_CURSOR_DFTWM,
3717 	2,
3718 	SNB_FIFO_LINE_SIZE
3719 };
3720 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3721 	SNB_DISPLAY_SR_FIFO,
3722 	SNB_DISPLAY_MAX_SRWM,
3723 	SNB_DISPLAY_DFT_SRWM,
3724 	2,
3725 	SNB_FIFO_LINE_SIZE
3726 };
3727 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3728 	SNB_CURSOR_SR_FIFO,
3729 	SNB_CURSOR_MAX_SRWM,
3730 	SNB_CURSOR_DFT_SRWM,
3731 	2,
3732 	SNB_FIFO_LINE_SIZE
3733 };
3734 
3735 
3736 /**
3737  * intel_calculate_wm - calculate watermark level
3738  * @clock_in_khz: pixel clock
3739  * @wm: chip FIFO params
3740  * @pixel_size: display pixel size
3741  * @latency_ns: memory latency for the platform
3742  *
3743  * Calculate the watermark level (the level at which the display plane will
3744  * start fetching from memory again).  Each chip has a different display
3745  * FIFO size and allocation, so the caller needs to figure that out and pass
3746  * in the correct intel_watermark_params structure.
3747  *
3748  * As the pixel clock runs, the FIFO will be drained at a rate that depends
3749  * on the pixel size.  When it reaches the watermark level, it'll start
3750  * fetching FIFO line sized based chunks from memory until the FIFO fills
3751  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3752  * will occur, and a display engine hang could result.
3753  */
intel_calculate_wm(unsigned long clock_in_khz,const struct intel_watermark_params * wm,int fifo_size,int pixel_size,unsigned long latency_ns)3754 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3755 					const struct intel_watermark_params *wm,
3756 					int fifo_size,
3757 					int pixel_size,
3758 					unsigned long latency_ns)
3759 {
3760 	long entries_required, wm_size;
3761 
3762 	/*
3763 	 * Note: we need to make sure we don't overflow for various clock &
3764 	 * latency values.
3765 	 * clocks go from a few thousand to several hundred thousand.
3766 	 * latency is usually a few thousand
3767 	 */
3768 	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3769 		1000;
3770 	entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3771 
3772 	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3773 
3774 	wm_size = fifo_size - (entries_required + wm->guard_size);
3775 
3776 	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3777 
3778 	/* Don't promote wm_size to unsigned... */
3779 	if (wm_size > (long)wm->max_wm)
3780 		wm_size = wm->max_wm;
3781 	if (wm_size <= 0)
3782 		wm_size = wm->default_wm;
3783 	return wm_size;
3784 }
3785 
3786 struct cxsr_latency {
3787 	int is_desktop;
3788 	int is_ddr3;
3789 	unsigned long fsb_freq;
3790 	unsigned long mem_freq;
3791 	unsigned long display_sr;
3792 	unsigned long display_hpll_disable;
3793 	unsigned long cursor_sr;
3794 	unsigned long cursor_hpll_disable;
3795 };
3796 
3797 static const struct cxsr_latency cxsr_latency_table[] = {
3798 	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3799 	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3800 	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3801 	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3802 	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3803 
3804 	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3805 	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3806 	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3807 	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3808 	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3809 
3810 	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3811 	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3812 	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3813 	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3814 	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3815 
3816 	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3817 	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3818 	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3819 	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3820 	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3821 
3822 	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3823 	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3824 	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3825 	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3826 	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3827 
3828 	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3829 	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3830 	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3831 	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3832 	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3833 };
3834 
intel_get_cxsr_latency(int is_desktop,int is_ddr3,int fsb,int mem)3835 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3836 							 int is_ddr3,
3837 							 int fsb,
3838 							 int mem)
3839 {
3840 	const struct cxsr_latency *latency;
3841 	int i;
3842 
3843 	if (fsb == 0 || mem == 0)
3844 		return NULL;
3845 
3846 	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3847 		latency = &cxsr_latency_table[i];
3848 		if (is_desktop == latency->is_desktop &&
3849 		    is_ddr3 == latency->is_ddr3 &&
3850 		    fsb == latency->fsb_freq && mem == latency->mem_freq)
3851 			return latency;
3852 	}
3853 
3854 	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3855 
3856 	return NULL;
3857 }
3858 
pineview_disable_cxsr(struct drm_device * dev)3859 static void pineview_disable_cxsr(struct drm_device *dev)
3860 {
3861 	struct drm_i915_private *dev_priv = dev->dev_private;
3862 
3863 	/* deactivate cxsr */
3864 	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3865 }
3866 
3867 /*
3868  * Latency for FIFO fetches is dependent on several factors:
3869  *   - memory configuration (speed, channels)
3870  *   - chipset
3871  *   - current MCH state
3872  * It can be fairly high in some situations, so here we assume a fairly
3873  * pessimal value.  It's a tradeoff between extra memory fetches (if we
3874  * set this value too high, the FIFO will fetch frequently to stay full)
3875  * and power consumption (set it too low to save power and we might see
3876  * FIFO underruns and display "flicker").
3877  *
3878  * A value of 5us seems to be a good balance; safe for very low end
3879  * platforms but not overly aggressive on lower latency configs.
3880  */
3881 static const int latency_ns = 5000;
3882 
i9xx_get_fifo_size(struct drm_device * dev,int plane)3883 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3884 {
3885 	struct drm_i915_private *dev_priv = dev->dev_private;
3886 	uint32_t dsparb = I915_READ(DSPARB);
3887 	int size;
3888 
3889 	size = dsparb & 0x7f;
3890 	if (plane)
3891 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3892 
3893 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3894 		      plane ? "B" : "A", size);
3895 
3896 	return size;
3897 }
3898 
i85x_get_fifo_size(struct drm_device * dev,int plane)3899 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3900 {
3901 	struct drm_i915_private *dev_priv = dev->dev_private;
3902 	uint32_t dsparb = I915_READ(DSPARB);
3903 	int size;
3904 
3905 	size = dsparb & 0x1ff;
3906 	if (plane)
3907 		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3908 	size >>= 1; /* Convert to cachelines */
3909 
3910 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3911 		      plane ? "B" : "A", size);
3912 
3913 	return size;
3914 }
3915 
i845_get_fifo_size(struct drm_device * dev,int plane)3916 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3917 {
3918 	struct drm_i915_private *dev_priv = dev->dev_private;
3919 	uint32_t dsparb = I915_READ(DSPARB);
3920 	int size;
3921 
3922 	size = dsparb & 0x7f;
3923 	size >>= 2; /* Convert to cachelines */
3924 
3925 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3926 		      plane ? "B" : "A",
3927 		      size);
3928 
3929 	return size;
3930 }
3931 
i830_get_fifo_size(struct drm_device * dev,int plane)3932 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3933 {
3934 	struct drm_i915_private *dev_priv = dev->dev_private;
3935 	uint32_t dsparb = I915_READ(DSPARB);
3936 	int size;
3937 
3938 	size = dsparb & 0x7f;
3939 	size >>= 1; /* Convert to cachelines */
3940 
3941 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3942 		      plane ? "B" : "A", size);
3943 
3944 	return size;
3945 }
3946 
single_enabled_crtc(struct drm_device * dev)3947 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3948 {
3949 	struct drm_crtc *crtc, *enabled = NULL;
3950 
3951 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3952 		if (crtc->enabled && crtc->fb) {
3953 			if (enabled)
3954 				return NULL;
3955 			enabled = crtc;
3956 		}
3957 	}
3958 
3959 	return enabled;
3960 }
3961 
pineview_update_wm(struct drm_device * dev)3962 static void pineview_update_wm(struct drm_device *dev)
3963 {
3964 	struct drm_i915_private *dev_priv = dev->dev_private;
3965 	struct drm_crtc *crtc;
3966 	const struct cxsr_latency *latency;
3967 	u32 reg;
3968 	unsigned long wm;
3969 
3970 	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3971 					 dev_priv->fsb_freq, dev_priv->mem_freq);
3972 	if (!latency) {
3973 		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3974 		pineview_disable_cxsr(dev);
3975 		return;
3976 	}
3977 
3978 	crtc = single_enabled_crtc(dev);
3979 	if (crtc) {
3980 		int clock = crtc->mode.clock;
3981 		int pixel_size = crtc->fb->bits_per_pixel / 8;
3982 
3983 		/* Display SR */
3984 		wm = intel_calculate_wm(clock, &pineview_display_wm,
3985 					pineview_display_wm.fifo_size,
3986 					pixel_size, latency->display_sr);
3987 		reg = I915_READ(DSPFW1);
3988 		reg &= ~DSPFW_SR_MASK;
3989 		reg |= wm << DSPFW_SR_SHIFT;
3990 		I915_WRITE(DSPFW1, reg);
3991 		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3992 
3993 		/* cursor SR */
3994 		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3995 					pineview_display_wm.fifo_size,
3996 					pixel_size, latency->cursor_sr);
3997 		reg = I915_READ(DSPFW3);
3998 		reg &= ~DSPFW_CURSOR_SR_MASK;
3999 		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
4000 		I915_WRITE(DSPFW3, reg);
4001 
4002 		/* Display HPLL off SR */
4003 		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4004 					pineview_display_hplloff_wm.fifo_size,
4005 					pixel_size, latency->display_hpll_disable);
4006 		reg = I915_READ(DSPFW3);
4007 		reg &= ~DSPFW_HPLL_SR_MASK;
4008 		reg |= wm & DSPFW_HPLL_SR_MASK;
4009 		I915_WRITE(DSPFW3, reg);
4010 
4011 		/* cursor HPLL off SR */
4012 		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4013 					pineview_display_hplloff_wm.fifo_size,
4014 					pixel_size, latency->cursor_hpll_disable);
4015 		reg = I915_READ(DSPFW3);
4016 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
4017 		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4018 		I915_WRITE(DSPFW3, reg);
4019 		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4020 
4021 		/* activate cxsr */
4022 		I915_WRITE(DSPFW3,
4023 			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4024 		DRM_DEBUG_KMS("Self-refresh is enabled\n");
4025 	} else {
4026 		pineview_disable_cxsr(dev);
4027 		DRM_DEBUG_KMS("Self-refresh is disabled\n");
4028 	}
4029 }
4030 
g4x_compute_wm0(struct drm_device * dev,int plane,const struct intel_watermark_params * display,int display_latency_ns,const struct intel_watermark_params * cursor,int cursor_latency_ns,int * plane_wm,int * cursor_wm)4031 static bool g4x_compute_wm0(struct drm_device *dev,
4032 			    int plane,
4033 			    const struct intel_watermark_params *display,
4034 			    int display_latency_ns,
4035 			    const struct intel_watermark_params *cursor,
4036 			    int cursor_latency_ns,
4037 			    int *plane_wm,
4038 			    int *cursor_wm)
4039 {
4040 	struct drm_crtc *crtc;
4041 	int htotal, hdisplay, clock, pixel_size;
4042 	int line_time_us, line_count;
4043 	int entries, tlb_miss;
4044 
4045 	crtc = intel_get_crtc_for_plane(dev, plane);
4046 	if (crtc->fb == NULL || !crtc->enabled) {
4047 		*cursor_wm = cursor->guard_size;
4048 		*plane_wm = display->guard_size;
4049 		return false;
4050 	}
4051 
4052 	htotal = crtc->mode.htotal;
4053 	hdisplay = crtc->mode.hdisplay;
4054 	clock = crtc->mode.clock;
4055 	pixel_size = crtc->fb->bits_per_pixel / 8;
4056 
4057 	/* Use the small buffer method to calculate plane watermark */
4058 	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4059 	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4060 	if (tlb_miss > 0)
4061 		entries += tlb_miss;
4062 	entries = DIV_ROUND_UP(entries, display->cacheline_size);
4063 	*plane_wm = entries + display->guard_size;
4064 	if (*plane_wm > (int)display->max_wm)
4065 		*plane_wm = display->max_wm;
4066 
4067 	/* Use the large buffer method to calculate cursor watermark */
4068 	line_time_us = ((htotal * 1000) / clock);
4069 	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4070 	entries = line_count * 64 * pixel_size;
4071 	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4072 	if (tlb_miss > 0)
4073 		entries += tlb_miss;
4074 	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4075 	*cursor_wm = entries + cursor->guard_size;
4076 	if (*cursor_wm > (int)cursor->max_wm)
4077 		*cursor_wm = (int)cursor->max_wm;
4078 
4079 	return true;
4080 }
4081 
4082 /*
4083  * Check the wm result.
4084  *
4085  * If any calculated watermark values is larger than the maximum value that
4086  * can be programmed into the associated watermark register, that watermark
4087  * must be disabled.
4088  */
g4x_check_srwm(struct drm_device * dev,int display_wm,int cursor_wm,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor)4089 static bool g4x_check_srwm(struct drm_device *dev,
4090 			   int display_wm, int cursor_wm,
4091 			   const struct intel_watermark_params *display,
4092 			   const struct intel_watermark_params *cursor)
4093 {
4094 	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4095 		      display_wm, cursor_wm);
4096 
4097 	if (display_wm > display->max_wm) {
4098 		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4099 			      display_wm, display->max_wm);
4100 		return false;
4101 	}
4102 
4103 	if (cursor_wm > cursor->max_wm) {
4104 		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4105 			      cursor_wm, cursor->max_wm);
4106 		return false;
4107 	}
4108 
4109 	if (!(display_wm || cursor_wm)) {
4110 		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4111 		return false;
4112 	}
4113 
4114 	return true;
4115 }
4116 
g4x_compute_srwm(struct drm_device * dev,int plane,int latency_ns,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor,int * display_wm,int * cursor_wm)4117 static bool g4x_compute_srwm(struct drm_device *dev,
4118 			     int plane,
4119 			     int latency_ns,
4120 			     const struct intel_watermark_params *display,
4121 			     const struct intel_watermark_params *cursor,
4122 			     int *display_wm, int *cursor_wm)
4123 {
4124 	struct drm_crtc *crtc;
4125 	int hdisplay, htotal, pixel_size, clock;
4126 	unsigned long line_time_us;
4127 	int line_count, line_size;
4128 	int small, large;
4129 	int entries;
4130 
4131 	if (!latency_ns) {
4132 		*display_wm = *cursor_wm = 0;
4133 		return false;
4134 	}
4135 
4136 	crtc = intel_get_crtc_for_plane(dev, plane);
4137 	hdisplay = crtc->mode.hdisplay;
4138 	htotal = crtc->mode.htotal;
4139 	clock = crtc->mode.clock;
4140 	pixel_size = crtc->fb->bits_per_pixel / 8;
4141 
4142 	line_time_us = (htotal * 1000) / clock;
4143 	line_count = (latency_ns / line_time_us + 1000) / 1000;
4144 	line_size = hdisplay * pixel_size;
4145 
4146 	/* Use the minimum of the small and large buffer method for primary */
4147 	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4148 	large = line_count * line_size;
4149 
4150 	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4151 	*display_wm = entries + display->guard_size;
4152 
4153 	/* calculate the self-refresh watermark for display cursor */
4154 	entries = line_count * pixel_size * 64;
4155 	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4156 	*cursor_wm = entries + cursor->guard_size;
4157 
4158 	return g4x_check_srwm(dev,
4159 			      *display_wm, *cursor_wm,
4160 			      display, cursor);
4161 }
4162 
4163 #define single_plane_enabled(mask) is_power_of_2(mask)
4164 
g4x_update_wm(struct drm_device * dev)4165 static void g4x_update_wm(struct drm_device *dev)
4166 {
4167 	static const int sr_latency_ns = 12000;
4168 	struct drm_i915_private *dev_priv = dev->dev_private;
4169 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4170 	int plane_sr, cursor_sr;
4171 	unsigned int enabled = 0;
4172 
4173 	if (g4x_compute_wm0(dev, 0,
4174 			    &g4x_wm_info, latency_ns,
4175 			    &g4x_cursor_wm_info, latency_ns,
4176 			    &planea_wm, &cursora_wm))
4177 		enabled |= 1;
4178 
4179 	if (g4x_compute_wm0(dev, 1,
4180 			    &g4x_wm_info, latency_ns,
4181 			    &g4x_cursor_wm_info, latency_ns,
4182 			    &planeb_wm, &cursorb_wm))
4183 		enabled |= 2;
4184 
4185 	plane_sr = cursor_sr = 0;
4186 	if (single_plane_enabled(enabled) &&
4187 	    g4x_compute_srwm(dev, ffs(enabled) - 1,
4188 			     sr_latency_ns,
4189 			     &g4x_wm_info,
4190 			     &g4x_cursor_wm_info,
4191 			     &plane_sr, &cursor_sr))
4192 		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4193 	else
4194 		I915_WRITE(FW_BLC_SELF,
4195 			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4196 
4197 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4198 		      planea_wm, cursora_wm,
4199 		      planeb_wm, cursorb_wm,
4200 		      plane_sr, cursor_sr);
4201 
4202 	I915_WRITE(DSPFW1,
4203 		   (plane_sr << DSPFW_SR_SHIFT) |
4204 		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4205 		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
4206 		   planea_wm);
4207 	I915_WRITE(DSPFW2,
4208 		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4209 		   (cursora_wm << DSPFW_CURSORA_SHIFT));
4210 	/* HPLL off in SR has some issues on G4x... disable it */
4211 	I915_WRITE(DSPFW3,
4212 		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4213 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4214 }
4215 
i965_update_wm(struct drm_device * dev)4216 static void i965_update_wm(struct drm_device *dev)
4217 {
4218 	struct drm_i915_private *dev_priv = dev->dev_private;
4219 	struct drm_crtc *crtc;
4220 	int srwm = 1;
4221 	int cursor_sr = 16;
4222 
4223 	/* Calc sr entries for one plane configs */
4224 	crtc = single_enabled_crtc(dev);
4225 	if (crtc) {
4226 		/* self-refresh has much higher latency */
4227 		static const int sr_latency_ns = 12000;
4228 		int clock = crtc->mode.clock;
4229 		int htotal = crtc->mode.htotal;
4230 		int hdisplay = crtc->mode.hdisplay;
4231 		int pixel_size = crtc->fb->bits_per_pixel / 8;
4232 		unsigned long line_time_us;
4233 		int entries;
4234 
4235 		line_time_us = ((htotal * 1000) / clock);
4236 
4237 		/* Use ns/us then divide to preserve precision */
4238 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4239 			pixel_size * hdisplay;
4240 		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4241 		srwm = I965_FIFO_SIZE - entries;
4242 		if (srwm < 0)
4243 			srwm = 1;
4244 		srwm &= 0x1ff;
4245 		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4246 			      entries, srwm);
4247 
4248 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4249 			pixel_size * 64;
4250 		entries = DIV_ROUND_UP(entries,
4251 					  i965_cursor_wm_info.cacheline_size);
4252 		cursor_sr = i965_cursor_wm_info.fifo_size -
4253 			(entries + i965_cursor_wm_info.guard_size);
4254 
4255 		if (cursor_sr > i965_cursor_wm_info.max_wm)
4256 			cursor_sr = i965_cursor_wm_info.max_wm;
4257 
4258 		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4259 			      "cursor %d\n", srwm, cursor_sr);
4260 
4261 		if (IS_CRESTLINE(dev))
4262 			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4263 	} else {
4264 		/* Turn off self refresh if both pipes are enabled */
4265 		if (IS_CRESTLINE(dev))
4266 			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4267 				   & ~FW_BLC_SELF_EN);
4268 	}
4269 
4270 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4271 		      srwm);
4272 
4273 	/* 965 has limitations... */
4274 	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4275 		   (8 << 16) | (8 << 8) | (8 << 0));
4276 	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4277 	/* update cursor SR watermark */
4278 	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4279 }
4280 
i9xx_update_wm(struct drm_device * dev)4281 static void i9xx_update_wm(struct drm_device *dev)
4282 {
4283 	struct drm_i915_private *dev_priv = dev->dev_private;
4284 	const struct intel_watermark_params *wm_info;
4285 	uint32_t fwater_lo;
4286 	uint32_t fwater_hi;
4287 	int cwm, srwm = 1;
4288 	int fifo_size;
4289 	int planea_wm, planeb_wm;
4290 	struct drm_crtc *crtc, *enabled = NULL;
4291 
4292 	if (IS_I945GM(dev))
4293 		wm_info = &i945_wm_info;
4294 	else if (!IS_GEN2(dev))
4295 		wm_info = &i915_wm_info;
4296 	else
4297 		wm_info = &i855_wm_info;
4298 
4299 	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4300 	crtc = intel_get_crtc_for_plane(dev, 0);
4301 	if (crtc->enabled && crtc->fb) {
4302 		planea_wm = intel_calculate_wm(crtc->mode.clock,
4303 					       wm_info, fifo_size,
4304 					       crtc->fb->bits_per_pixel / 8,
4305 					       latency_ns);
4306 		enabled = crtc;
4307 	} else
4308 		planea_wm = fifo_size - wm_info->guard_size;
4309 
4310 	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4311 	crtc = intel_get_crtc_for_plane(dev, 1);
4312 	if (crtc->enabled && crtc->fb) {
4313 		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4314 					       wm_info, fifo_size,
4315 					       crtc->fb->bits_per_pixel / 8,
4316 					       latency_ns);
4317 		if (enabled == NULL)
4318 			enabled = crtc;
4319 		else
4320 			enabled = NULL;
4321 	} else
4322 		planeb_wm = fifo_size - wm_info->guard_size;
4323 
4324 	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4325 
4326 	/*
4327 	 * Overlay gets an aggressive default since video jitter is bad.
4328 	 */
4329 	cwm = 2;
4330 
4331 	/* Play safe and disable self-refresh before adjusting watermarks. */
4332 	if (IS_I945G(dev) || IS_I945GM(dev))
4333 		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4334 	else if (IS_I915GM(dev))
4335 		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4336 
4337 	/* Calc sr entries for one plane configs */
4338 	if (HAS_FW_BLC(dev) && enabled) {
4339 		/* self-refresh has much higher latency */
4340 		static const int sr_latency_ns = 6000;
4341 		int clock = enabled->mode.clock;
4342 		int htotal = enabled->mode.htotal;
4343 		int hdisplay = enabled->mode.hdisplay;
4344 		int pixel_size = enabled->fb->bits_per_pixel / 8;
4345 		unsigned long line_time_us;
4346 		int entries;
4347 
4348 		line_time_us = (htotal * 1000) / clock;
4349 
4350 		/* Use ns/us then divide to preserve precision */
4351 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4352 			pixel_size * hdisplay;
4353 		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4354 		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4355 		srwm = wm_info->fifo_size - entries;
4356 		if (srwm < 0)
4357 			srwm = 1;
4358 
4359 		if (IS_I945G(dev) || IS_I945GM(dev))
4360 			I915_WRITE(FW_BLC_SELF,
4361 				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4362 		else if (IS_I915GM(dev))
4363 			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4364 	}
4365 
4366 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4367 		      planea_wm, planeb_wm, cwm, srwm);
4368 
4369 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4370 	fwater_hi = (cwm & 0x1f);
4371 
4372 	/* Set request length to 8 cachelines per fetch */
4373 	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4374 	fwater_hi = fwater_hi | (1 << 8);
4375 
4376 	I915_WRITE(FW_BLC, fwater_lo);
4377 	I915_WRITE(FW_BLC2, fwater_hi);
4378 
4379 	if (HAS_FW_BLC(dev)) {
4380 		if (enabled) {
4381 			if (IS_I945G(dev) || IS_I945GM(dev))
4382 				I915_WRITE(FW_BLC_SELF,
4383 					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4384 			else if (IS_I915GM(dev))
4385 				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4386 			DRM_DEBUG_KMS("memory self refresh enabled\n");
4387 		} else
4388 			DRM_DEBUG_KMS("memory self refresh disabled\n");
4389 	}
4390 }
4391 
i830_update_wm(struct drm_device * dev)4392 static void i830_update_wm(struct drm_device *dev)
4393 {
4394 	struct drm_i915_private *dev_priv = dev->dev_private;
4395 	struct drm_crtc *crtc;
4396 	uint32_t fwater_lo;
4397 	int planea_wm;
4398 
4399 	crtc = single_enabled_crtc(dev);
4400 	if (crtc == NULL)
4401 		return;
4402 
4403 	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4404 				       dev_priv->display.get_fifo_size(dev, 0),
4405 				       crtc->fb->bits_per_pixel / 8,
4406 				       latency_ns);
4407 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4408 	fwater_lo |= (3<<8) | planea_wm;
4409 
4410 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4411 
4412 	I915_WRITE(FW_BLC, fwater_lo);
4413 }
4414 
4415 #define ILK_LP0_PLANE_LATENCY		700
4416 #define ILK_LP0_CURSOR_LATENCY		1300
4417 
4418 /*
4419  * Check the wm result.
4420  *
4421  * If any calculated watermark values is larger than the maximum value that
4422  * can be programmed into the associated watermark register, that watermark
4423  * must be disabled.
4424  */
ironlake_check_srwm(struct drm_device * dev,int level,int fbc_wm,int display_wm,int cursor_wm,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor)4425 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4426 				int fbc_wm, int display_wm, int cursor_wm,
4427 				const struct intel_watermark_params *display,
4428 				const struct intel_watermark_params *cursor)
4429 {
4430 	struct drm_i915_private *dev_priv = dev->dev_private;
4431 
4432 	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4433 		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4434 
4435 	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4436 		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4437 			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4438 
4439 		/* fbc has it's own way to disable FBC WM */
4440 		I915_WRITE(DISP_ARB_CTL,
4441 			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4442 		return false;
4443 	}
4444 
4445 	if (display_wm > display->max_wm) {
4446 		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4447 			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4448 		return false;
4449 	}
4450 
4451 	if (cursor_wm > cursor->max_wm) {
4452 		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4453 			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4454 		return false;
4455 	}
4456 
4457 	if (!(fbc_wm || display_wm || cursor_wm)) {
4458 		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4459 		return false;
4460 	}
4461 
4462 	return true;
4463 }
4464 
4465 /*
4466  * Compute watermark values of WM[1-3],
4467  */
ironlake_compute_srwm(struct drm_device * dev,int level,int plane,int latency_ns,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor,int * fbc_wm,int * display_wm,int * cursor_wm)4468 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4469 				  int latency_ns,
4470 				  const struct intel_watermark_params *display,
4471 				  const struct intel_watermark_params *cursor,
4472 				  int *fbc_wm, int *display_wm, int *cursor_wm)
4473 {
4474 	struct drm_crtc *crtc;
4475 	unsigned long line_time_us;
4476 	int hdisplay, htotal, pixel_size, clock;
4477 	int line_count, line_size;
4478 	int small, large;
4479 	int entries;
4480 
4481 	if (!latency_ns) {
4482 		*fbc_wm = *display_wm = *cursor_wm = 0;
4483 		return false;
4484 	}
4485 
4486 	crtc = intel_get_crtc_for_plane(dev, plane);
4487 	hdisplay = crtc->mode.hdisplay;
4488 	htotal = crtc->mode.htotal;
4489 	clock = crtc->mode.clock;
4490 	pixel_size = crtc->fb->bits_per_pixel / 8;
4491 
4492 	line_time_us = (htotal * 1000) / clock;
4493 	line_count = (latency_ns / line_time_us + 1000) / 1000;
4494 	line_size = hdisplay * pixel_size;
4495 
4496 	/* Use the minimum of the small and large buffer method for primary */
4497 	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4498 	large = line_count * line_size;
4499 
4500 	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4501 	*display_wm = entries + display->guard_size;
4502 
4503 	/*
4504 	 * Spec says:
4505 	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4506 	 */
4507 	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4508 
4509 	/* calculate the self-refresh watermark for display cursor */
4510 	entries = line_count * pixel_size * 64;
4511 	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4512 	*cursor_wm = entries + cursor->guard_size;
4513 
4514 	return ironlake_check_srwm(dev, level,
4515 				   *fbc_wm, *display_wm, *cursor_wm,
4516 				   display, cursor);
4517 }
4518 
ironlake_update_wm(struct drm_device * dev)4519 static void ironlake_update_wm(struct drm_device *dev)
4520 {
4521 	struct drm_i915_private *dev_priv = dev->dev_private;
4522 	int fbc_wm, plane_wm, cursor_wm;
4523 	unsigned int enabled;
4524 
4525 	enabled = 0;
4526 	if (g4x_compute_wm0(dev, 0,
4527 			    &ironlake_display_wm_info,
4528 			    ILK_LP0_PLANE_LATENCY,
4529 			    &ironlake_cursor_wm_info,
4530 			    ILK_LP0_CURSOR_LATENCY,
4531 			    &plane_wm, &cursor_wm)) {
4532 		I915_WRITE(WM0_PIPEA_ILK,
4533 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4534 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4535 			      " plane %d, " "cursor: %d\n",
4536 			      plane_wm, cursor_wm);
4537 		enabled |= 1;
4538 	}
4539 
4540 	if (g4x_compute_wm0(dev, 1,
4541 			    &ironlake_display_wm_info,
4542 			    ILK_LP0_PLANE_LATENCY,
4543 			    &ironlake_cursor_wm_info,
4544 			    ILK_LP0_CURSOR_LATENCY,
4545 			    &plane_wm, &cursor_wm)) {
4546 		I915_WRITE(WM0_PIPEB_ILK,
4547 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4548 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4549 			      " plane %d, cursor: %d\n",
4550 			      plane_wm, cursor_wm);
4551 		enabled |= 2;
4552 	}
4553 
4554 	/*
4555 	 * Calculate and update the self-refresh watermark only when one
4556 	 * display plane is used.
4557 	 */
4558 	I915_WRITE(WM3_LP_ILK, 0);
4559 	I915_WRITE(WM2_LP_ILK, 0);
4560 	I915_WRITE(WM1_LP_ILK, 0);
4561 
4562 	if (!single_plane_enabled(enabled))
4563 		return;
4564 	enabled = ffs(enabled) - 1;
4565 
4566 	/* WM1 */
4567 	if (!ironlake_compute_srwm(dev, 1, enabled,
4568 				   ILK_READ_WM1_LATENCY() * 500,
4569 				   &ironlake_display_srwm_info,
4570 				   &ironlake_cursor_srwm_info,
4571 				   &fbc_wm, &plane_wm, &cursor_wm))
4572 		return;
4573 
4574 	I915_WRITE(WM1_LP_ILK,
4575 		   WM1_LP_SR_EN |
4576 		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4577 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4578 		   (plane_wm << WM1_LP_SR_SHIFT) |
4579 		   cursor_wm);
4580 
4581 	/* WM2 */
4582 	if (!ironlake_compute_srwm(dev, 2, enabled,
4583 				   ILK_READ_WM2_LATENCY() * 500,
4584 				   &ironlake_display_srwm_info,
4585 				   &ironlake_cursor_srwm_info,
4586 				   &fbc_wm, &plane_wm, &cursor_wm))
4587 		return;
4588 
4589 	I915_WRITE(WM2_LP_ILK,
4590 		   WM2_LP_EN |
4591 		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4592 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4593 		   (plane_wm << WM1_LP_SR_SHIFT) |
4594 		   cursor_wm);
4595 
4596 	/*
4597 	 * WM3 is unsupported on ILK, probably because we don't have latency
4598 	 * data for that power state
4599 	 */
4600 }
4601 
sandybridge_update_wm(struct drm_device * dev)4602 void sandybridge_update_wm(struct drm_device *dev)
4603 {
4604 	struct drm_i915_private *dev_priv = dev->dev_private;
4605 	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4606 	u32 val;
4607 	int fbc_wm, plane_wm, cursor_wm;
4608 	unsigned int enabled;
4609 
4610 	enabled = 0;
4611 	if (g4x_compute_wm0(dev, 0,
4612 			    &sandybridge_display_wm_info, latency,
4613 			    &sandybridge_cursor_wm_info, latency,
4614 			    &plane_wm, &cursor_wm)) {
4615 		val = I915_READ(WM0_PIPEA_ILK);
4616 		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4617 		I915_WRITE(WM0_PIPEA_ILK, val |
4618 			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4619 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4620 			      " plane %d, " "cursor: %d\n",
4621 			      plane_wm, cursor_wm);
4622 		enabled |= 1;
4623 	}
4624 
4625 	if (g4x_compute_wm0(dev, 1,
4626 			    &sandybridge_display_wm_info, latency,
4627 			    &sandybridge_cursor_wm_info, latency,
4628 			    &plane_wm, &cursor_wm)) {
4629 		val = I915_READ(WM0_PIPEB_ILK);
4630 		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4631 		I915_WRITE(WM0_PIPEB_ILK, val |
4632 			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4633 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4634 			      " plane %d, cursor: %d\n",
4635 			      plane_wm, cursor_wm);
4636 		enabled |= 2;
4637 	}
4638 
4639 	/* IVB has 3 pipes */
4640 	if (IS_IVYBRIDGE(dev) &&
4641 	    g4x_compute_wm0(dev, 2,
4642 			    &sandybridge_display_wm_info, latency,
4643 			    &sandybridge_cursor_wm_info, latency,
4644 			    &plane_wm, &cursor_wm)) {
4645 		val = I915_READ(WM0_PIPEC_IVB);
4646 		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4647 		I915_WRITE(WM0_PIPEC_IVB, val |
4648 			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4649 		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4650 			      " plane %d, cursor: %d\n",
4651 			      plane_wm, cursor_wm);
4652 		enabled |= 3;
4653 	}
4654 
4655 	/*
4656 	 * Calculate and update the self-refresh watermark only when one
4657 	 * display plane is used.
4658 	 *
4659 	 * SNB support 3 levels of watermark.
4660 	 *
4661 	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4662 	 * and disabled in the descending order
4663 	 *
4664 	 */
4665 	I915_WRITE(WM3_LP_ILK, 0);
4666 	I915_WRITE(WM2_LP_ILK, 0);
4667 	I915_WRITE(WM1_LP_ILK, 0);
4668 
4669 	if (!single_plane_enabled(enabled) ||
4670 	    dev_priv->sprite_scaling_enabled)
4671 		return;
4672 	enabled = ffs(enabled) - 1;
4673 
4674 	/* WM1 */
4675 	if (!ironlake_compute_srwm(dev, 1, enabled,
4676 				   SNB_READ_WM1_LATENCY() * 500,
4677 				   &sandybridge_display_srwm_info,
4678 				   &sandybridge_cursor_srwm_info,
4679 				   &fbc_wm, &plane_wm, &cursor_wm))
4680 		return;
4681 
4682 	I915_WRITE(WM1_LP_ILK,
4683 		   WM1_LP_SR_EN |
4684 		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4685 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4686 		   (plane_wm << WM1_LP_SR_SHIFT) |
4687 		   cursor_wm);
4688 
4689 	/* WM2 */
4690 	if (!ironlake_compute_srwm(dev, 2, enabled,
4691 				   SNB_READ_WM2_LATENCY() * 500,
4692 				   &sandybridge_display_srwm_info,
4693 				   &sandybridge_cursor_srwm_info,
4694 				   &fbc_wm, &plane_wm, &cursor_wm))
4695 		return;
4696 
4697 	I915_WRITE(WM2_LP_ILK,
4698 		   WM2_LP_EN |
4699 		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4700 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4701 		   (plane_wm << WM1_LP_SR_SHIFT) |
4702 		   cursor_wm);
4703 
4704 	/* WM3 */
4705 	if (!ironlake_compute_srwm(dev, 3, enabled,
4706 				   SNB_READ_WM3_LATENCY() * 500,
4707 				   &sandybridge_display_srwm_info,
4708 				   &sandybridge_cursor_srwm_info,
4709 				   &fbc_wm, &plane_wm, &cursor_wm))
4710 		return;
4711 
4712 	I915_WRITE(WM3_LP_ILK,
4713 		   WM3_LP_EN |
4714 		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4715 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4716 		   (plane_wm << WM1_LP_SR_SHIFT) |
4717 		   cursor_wm);
4718 }
4719 
4720 static bool
sandybridge_compute_sprite_wm(struct drm_device * dev,int plane,uint32_t sprite_width,int pixel_size,const struct intel_watermark_params * display,int display_latency_ns,int * sprite_wm)4721 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4722 			      uint32_t sprite_width, int pixel_size,
4723 			      const struct intel_watermark_params *display,
4724 			      int display_latency_ns, int *sprite_wm)
4725 {
4726 	struct drm_crtc *crtc;
4727 	int clock;
4728 	int entries, tlb_miss;
4729 
4730 	crtc = intel_get_crtc_for_plane(dev, plane);
4731 	if (crtc->fb == NULL || !crtc->enabled) {
4732 		*sprite_wm = display->guard_size;
4733 		return false;
4734 	}
4735 
4736 	clock = crtc->mode.clock;
4737 
4738 	/* Use the small buffer method to calculate the sprite watermark */
4739 	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4740 	tlb_miss = display->fifo_size*display->cacheline_size -
4741 		sprite_width * 8;
4742 	if (tlb_miss > 0)
4743 		entries += tlb_miss;
4744 	entries = DIV_ROUND_UP(entries, display->cacheline_size);
4745 	*sprite_wm = entries + display->guard_size;
4746 	if (*sprite_wm > (int)display->max_wm)
4747 		*sprite_wm = display->max_wm;
4748 
4749 	return true;
4750 }
4751 
4752 static bool
sandybridge_compute_sprite_srwm(struct drm_device * dev,int plane,uint32_t sprite_width,int pixel_size,const struct intel_watermark_params * display,int latency_ns,int * sprite_wm)4753 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4754 				uint32_t sprite_width, int pixel_size,
4755 				const struct intel_watermark_params *display,
4756 				int latency_ns, int *sprite_wm)
4757 {
4758 	struct drm_crtc *crtc;
4759 	unsigned long line_time_us;
4760 	int clock;
4761 	int line_count, line_size;
4762 	int small, large;
4763 	int entries;
4764 
4765 	if (!latency_ns) {
4766 		*sprite_wm = 0;
4767 		return false;
4768 	}
4769 
4770 	crtc = intel_get_crtc_for_plane(dev, plane);
4771 	clock = crtc->mode.clock;
4772 	if (!clock) {
4773 		*sprite_wm = 0;
4774 		return false;
4775 	}
4776 
4777 	line_time_us = (sprite_width * 1000) / clock;
4778 	if (!line_time_us) {
4779 		*sprite_wm = 0;
4780 		return false;
4781 	}
4782 
4783 	line_count = (latency_ns / line_time_us + 1000) / 1000;
4784 	line_size = sprite_width * pixel_size;
4785 
4786 	/* Use the minimum of the small and large buffer method for primary */
4787 	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4788 	large = line_count * line_size;
4789 
4790 	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4791 	*sprite_wm = entries + display->guard_size;
4792 
4793 	return *sprite_wm > 0x3ff ? false : true;
4794 }
4795 
sandybridge_update_sprite_wm(struct drm_device * dev,int pipe,uint32_t sprite_width,int pixel_size)4796 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4797 					 uint32_t sprite_width, int pixel_size)
4798 {
4799 	struct drm_i915_private *dev_priv = dev->dev_private;
4800 	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4801 	u32 val;
4802 	int sprite_wm, reg;
4803 	int ret;
4804 
4805 	switch (pipe) {
4806 	case 0:
4807 		reg = WM0_PIPEA_ILK;
4808 		break;
4809 	case 1:
4810 		reg = WM0_PIPEB_ILK;
4811 		break;
4812 	case 2:
4813 		reg = WM0_PIPEC_IVB;
4814 		break;
4815 	default:
4816 		return; /* bad pipe */
4817 	}
4818 
4819 	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4820 					    &sandybridge_display_wm_info,
4821 					    latency, &sprite_wm);
4822 	if (!ret) {
4823 		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4824 			      pipe);
4825 		return;
4826 	}
4827 
4828 	val = I915_READ(reg);
4829 	val &= ~WM0_PIPE_SPRITE_MASK;
4830 	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4831 	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4832 
4833 
4834 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4835 					      pixel_size,
4836 					      &sandybridge_display_srwm_info,
4837 					      SNB_READ_WM1_LATENCY() * 500,
4838 					      &sprite_wm);
4839 	if (!ret) {
4840 		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4841 			      pipe);
4842 		return;
4843 	}
4844 	I915_WRITE(WM1S_LP_ILK, sprite_wm);
4845 
4846 	/* Only IVB has two more LP watermarks for sprite */
4847 	if (!IS_IVYBRIDGE(dev))
4848 		return;
4849 
4850 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4851 					      pixel_size,
4852 					      &sandybridge_display_srwm_info,
4853 					      SNB_READ_WM2_LATENCY() * 500,
4854 					      &sprite_wm);
4855 	if (!ret) {
4856 		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4857 			      pipe);
4858 		return;
4859 	}
4860 	I915_WRITE(WM2S_LP_IVB, sprite_wm);
4861 
4862 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4863 					      pixel_size,
4864 					      &sandybridge_display_srwm_info,
4865 					      SNB_READ_WM3_LATENCY() * 500,
4866 					      &sprite_wm);
4867 	if (!ret) {
4868 		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4869 			      pipe);
4870 		return;
4871 	}
4872 	I915_WRITE(WM3S_LP_IVB, sprite_wm);
4873 }
4874 
4875 /**
4876  * intel_update_watermarks - update FIFO watermark values based on current modes
4877  *
4878  * Calculate watermark values for the various WM regs based on current mode
4879  * and plane configuration.
4880  *
4881  * There are several cases to deal with here:
4882  *   - normal (i.e. non-self-refresh)
4883  *   - self-refresh (SR) mode
4884  *   - lines are large relative to FIFO size (buffer can hold up to 2)
4885  *   - lines are small relative to FIFO size (buffer can hold more than 2
4886  *     lines), so need to account for TLB latency
4887  *
4888  *   The normal calculation is:
4889  *     watermark = dotclock * bytes per pixel * latency
4890  *   where latency is platform & configuration dependent (we assume pessimal
4891  *   values here).
4892  *
4893  *   The SR calculation is:
4894  *     watermark = (trunc(latency/line time)+1) * surface width *
4895  *       bytes per pixel
4896  *   where
4897  *     line time = htotal / dotclock
4898  *     surface width = hdisplay for normal plane and 64 for cursor
4899  *   and latency is assumed to be high, as above.
4900  *
4901  * The final value programmed to the register should always be rounded up,
4902  * and include an extra 2 entries to account for clock crossings.
4903  *
4904  * We don't use the sprite, so we can ignore that.  And on Crestline we have
4905  * to set the non-SR watermarks to 8.
4906  */
intel_update_watermarks(struct drm_device * dev)4907 static void intel_update_watermarks(struct drm_device *dev)
4908 {
4909 	struct drm_i915_private *dev_priv = dev->dev_private;
4910 
4911 	if (dev_priv->display.update_wm)
4912 		dev_priv->display.update_wm(dev);
4913 }
4914 
intel_update_sprite_watermarks(struct drm_device * dev,int pipe,uint32_t sprite_width,int pixel_size)4915 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4916 				    uint32_t sprite_width, int pixel_size)
4917 {
4918 	struct drm_i915_private *dev_priv = dev->dev_private;
4919 
4920 	if (dev_priv->display.update_sprite_wm)
4921 		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4922 						   pixel_size);
4923 }
4924 
intel_panel_use_ssc(struct drm_i915_private * dev_priv)4925 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4926 {
4927 	if (i915_panel_use_ssc >= 0)
4928 		return i915_panel_use_ssc != 0;
4929 	return dev_priv->lvds_use_ssc
4930 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4931 }
4932 
4933 /**
4934  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4935  * @crtc: CRTC structure
4936  * @mode: requested mode
4937  *
4938  * A pipe may be connected to one or more outputs.  Based on the depth of the
4939  * attached framebuffer, choose a good color depth to use on the pipe.
4940  *
4941  * If possible, match the pipe depth to the fb depth.  In some cases, this
4942  * isn't ideal, because the connected output supports a lesser or restricted
4943  * set of depths.  Resolve that here:
4944  *    LVDS typically supports only 6bpc, so clamp down in that case
4945  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4946  *    Displays may support a restricted set as well, check EDID and clamp as
4947  *      appropriate.
4948  *    DP may want to dither down to 6bpc to fit larger modes
4949  *
4950  * RETURNS:
4951  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4952  * true if they don't match).
4953  */
intel_choose_pipe_bpp_dither(struct drm_crtc * crtc,unsigned int * pipe_bpp,struct drm_display_mode * mode)4954 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4955 					 unsigned int *pipe_bpp,
4956 					 struct drm_display_mode *mode)
4957 {
4958 	struct drm_device *dev = crtc->dev;
4959 	struct drm_i915_private *dev_priv = dev->dev_private;
4960 	struct drm_encoder *encoder;
4961 	struct drm_connector *connector;
4962 	unsigned int display_bpc = UINT_MAX, bpc;
4963 
4964 	/* Walk the encoders & connectors on this crtc, get min bpc */
4965 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4966 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4967 
4968 		if (encoder->crtc != crtc)
4969 			continue;
4970 
4971 		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4972 			unsigned int lvds_bpc;
4973 
4974 			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4975 			    LVDS_A3_POWER_UP)
4976 				lvds_bpc = 8;
4977 			else
4978 				lvds_bpc = 6;
4979 
4980 			if (lvds_bpc < display_bpc) {
4981 				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4982 				display_bpc = lvds_bpc;
4983 			}
4984 			continue;
4985 		}
4986 
4987 		/* Not one of the known troublemakers, check the EDID */
4988 		list_for_each_entry(connector, &dev->mode_config.connector_list,
4989 				    head) {
4990 			if (connector->encoder != encoder)
4991 				continue;
4992 
4993 			/* Don't use an invalid EDID bpc value */
4994 			if (connector->display_info.bpc &&
4995 			    connector->display_info.bpc < display_bpc) {
4996 				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4997 				display_bpc = connector->display_info.bpc;
4998 			}
4999 		}
5000 
5001 		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
5002 			/* Use VBT settings if we have an eDP panel */
5003 			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
5004 
5005 			if (edp_bpc && edp_bpc < display_bpc) {
5006 				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5007 				display_bpc = edp_bpc;
5008 			}
5009 			continue;
5010 		}
5011 
5012 		/*
5013 		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5014 		 * through, clamp it down.  (Note: >12bpc will be caught below.)
5015 		 */
5016 		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5017 			if (display_bpc > 8 && display_bpc < 12) {
5018 				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5019 				display_bpc = 12;
5020 			} else {
5021 				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5022 				display_bpc = 8;
5023 			}
5024 		}
5025 	}
5026 
5027 	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5028 		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5029 		display_bpc = 6;
5030 	}
5031 
5032 	/*
5033 	 * We could just drive the pipe at the highest bpc all the time and
5034 	 * enable dithering as needed, but that costs bandwidth.  So choose
5035 	 * the minimum value that expresses the full color range of the fb but
5036 	 * also stays within the max display bpc discovered above.
5037 	 */
5038 
5039 	switch (crtc->fb->depth) {
5040 	case 8:
5041 		bpc = 8; /* since we go through a colormap */
5042 		break;
5043 	case 15:
5044 	case 16:
5045 		bpc = 6; /* min is 18bpp */
5046 		break;
5047 	case 24:
5048 		bpc = 8;
5049 		break;
5050 	case 30:
5051 		bpc = 10;
5052 		break;
5053 	case 48:
5054 		bpc = 12;
5055 		break;
5056 	default:
5057 		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5058 		bpc = min((unsigned int)8, display_bpc);
5059 		break;
5060 	}
5061 
5062 	display_bpc = min(display_bpc, bpc);
5063 
5064 	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5065 		      bpc, display_bpc);
5066 
5067 	*pipe_bpp = display_bpc * 3;
5068 
5069 	return display_bpc != bpc;
5070 }
5071 
i9xx_get_refclk(struct drm_crtc * crtc,int num_connectors)5072 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5073 {
5074 	struct drm_device *dev = crtc->dev;
5075 	struct drm_i915_private *dev_priv = dev->dev_private;
5076 	int refclk;
5077 
5078 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5079 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5080 		refclk = dev_priv->lvds_ssc_freq * 1000;
5081 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5082 			      refclk / 1000);
5083 	} else if (!IS_GEN2(dev)) {
5084 		refclk = 96000;
5085 	} else {
5086 		refclk = 48000;
5087 	}
5088 
5089 	return refclk;
5090 }
5091 
i9xx_adjust_sdvo_tv_clock(struct drm_display_mode * adjusted_mode,intel_clock_t * clock)5092 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5093 				      intel_clock_t *clock)
5094 {
5095 	/* SDVO TV has fixed PLL values depend on its clock range,
5096 	   this mirrors vbios setting. */
5097 	if (adjusted_mode->clock >= 100000
5098 	    && adjusted_mode->clock < 140500) {
5099 		clock->p1 = 2;
5100 		clock->p2 = 10;
5101 		clock->n = 3;
5102 		clock->m1 = 16;
5103 		clock->m2 = 8;
5104 	} else if (adjusted_mode->clock >= 140500
5105 		   && adjusted_mode->clock <= 200000) {
5106 		clock->p1 = 1;
5107 		clock->p2 = 10;
5108 		clock->n = 6;
5109 		clock->m1 = 12;
5110 		clock->m2 = 8;
5111 	}
5112 }
5113 
i9xx_update_pll_dividers(struct drm_crtc * crtc,intel_clock_t * clock,intel_clock_t * reduced_clock)5114 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5115 				     intel_clock_t *clock,
5116 				     intel_clock_t *reduced_clock)
5117 {
5118 	struct drm_device *dev = crtc->dev;
5119 	struct drm_i915_private *dev_priv = dev->dev_private;
5120 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5121 	int pipe = intel_crtc->pipe;
5122 	u32 fp, fp2 = 0;
5123 
5124 	if (IS_PINEVIEW(dev)) {
5125 		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5126 		if (reduced_clock)
5127 			fp2 = (1 << reduced_clock->n) << 16 |
5128 				reduced_clock->m1 << 8 | reduced_clock->m2;
5129 	} else {
5130 		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5131 		if (reduced_clock)
5132 			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5133 				reduced_clock->m2;
5134 	}
5135 
5136 	I915_WRITE(FP0(pipe), fp);
5137 
5138 	intel_crtc->lowfreq_avail = false;
5139 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5140 	    reduced_clock && i915_powersave) {
5141 		I915_WRITE(FP1(pipe), fp2);
5142 		intel_crtc->lowfreq_avail = true;
5143 	} else {
5144 		I915_WRITE(FP1(pipe), fp);
5145 	}
5146 }
5147 
i9xx_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)5148 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5149 			      struct drm_display_mode *mode,
5150 			      struct drm_display_mode *adjusted_mode,
5151 			      int x, int y,
5152 			      struct drm_framebuffer *old_fb)
5153 {
5154 	struct drm_device *dev = crtc->dev;
5155 	struct drm_i915_private *dev_priv = dev->dev_private;
5156 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5157 	int pipe = intel_crtc->pipe;
5158 	int plane = intel_crtc->plane;
5159 	int refclk, num_connectors = 0;
5160 	intel_clock_t clock, reduced_clock;
5161 	u32 dpll, dspcntr, pipeconf, vsyncshift;
5162 	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5163 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5164 	struct drm_mode_config *mode_config = &dev->mode_config;
5165 	struct intel_encoder *encoder;
5166 	const intel_limit_t *limit;
5167 	int ret;
5168 	u32 temp;
5169 	u32 lvds_sync = 0;
5170 
5171 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5172 		if (encoder->base.crtc != crtc)
5173 			continue;
5174 
5175 		switch (encoder->type) {
5176 		case INTEL_OUTPUT_LVDS:
5177 			is_lvds = true;
5178 			break;
5179 		case INTEL_OUTPUT_SDVO:
5180 		case INTEL_OUTPUT_HDMI:
5181 			is_sdvo = true;
5182 			if (encoder->needs_tv_clock)
5183 				is_tv = true;
5184 			break;
5185 		case INTEL_OUTPUT_DVO:
5186 			is_dvo = true;
5187 			break;
5188 		case INTEL_OUTPUT_TVOUT:
5189 			is_tv = true;
5190 			break;
5191 		case INTEL_OUTPUT_ANALOG:
5192 			is_crt = true;
5193 			break;
5194 		case INTEL_OUTPUT_DISPLAYPORT:
5195 			is_dp = true;
5196 			break;
5197 		}
5198 
5199 		num_connectors++;
5200 	}
5201 
5202 	refclk = i9xx_get_refclk(crtc, num_connectors);
5203 
5204 	/*
5205 	 * Returns a set of divisors for the desired target clock with the given
5206 	 * refclk, or FALSE.  The returned values represent the clock equation:
5207 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5208 	 */
5209 	limit = intel_limit(crtc, refclk);
5210 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5211 			     &clock);
5212 	if (!ok) {
5213 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5214 		return -EINVAL;
5215 	}
5216 
5217 	/* Ensure that the cursor is valid for the new mode before changing... */
5218 	intel_crtc_update_cursor(crtc, true);
5219 
5220 	if (is_lvds && dev_priv->lvds_downclock_avail) {
5221 		/*
5222 		 * Ensure we match the reduced clock's P to the target clock.
5223 		 * If the clocks don't match, we can't switch the display clock
5224 		 * by using the FP0/FP1. In such case we will disable the LVDS
5225 		 * downclock feature.
5226 		*/
5227 		has_reduced_clock = limit->find_pll(limit, crtc,
5228 						    dev_priv->lvds_downclock,
5229 						    refclk,
5230 						    &clock,
5231 						    &reduced_clock);
5232 	}
5233 
5234 	if (is_sdvo && is_tv)
5235 		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5236 
5237 	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5238 				 &reduced_clock : NULL);
5239 
5240 	dpll = DPLL_VGA_MODE_DIS;
5241 
5242 	if (!IS_GEN2(dev)) {
5243 		if (is_lvds)
5244 			dpll |= DPLLB_MODE_LVDS;
5245 		else
5246 			dpll |= DPLLB_MODE_DAC_SERIAL;
5247 		if (is_sdvo) {
5248 			int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5249 			if (pixel_multiplier > 1) {
5250 				if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5251 					dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5252 			}
5253 			dpll |= DPLL_DVO_HIGH_SPEED;
5254 		}
5255 		if (is_dp)
5256 			dpll |= DPLL_DVO_HIGH_SPEED;
5257 
5258 		/* compute bitmask from p1 value */
5259 		if (IS_PINEVIEW(dev))
5260 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5261 		else {
5262 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5263 			if (IS_G4X(dev) && has_reduced_clock)
5264 				dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5265 		}
5266 		switch (clock.p2) {
5267 		case 5:
5268 			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5269 			break;
5270 		case 7:
5271 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5272 			break;
5273 		case 10:
5274 			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5275 			break;
5276 		case 14:
5277 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5278 			break;
5279 		}
5280 		if (INTEL_INFO(dev)->gen >= 4)
5281 			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5282 	} else {
5283 		if (is_lvds) {
5284 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5285 		} else {
5286 			if (clock.p1 == 2)
5287 				dpll |= PLL_P1_DIVIDE_BY_TWO;
5288 			else
5289 				dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5290 			if (clock.p2 == 4)
5291 				dpll |= PLL_P2_DIVIDE_BY_4;
5292 		}
5293 	}
5294 
5295 	if (is_sdvo && is_tv)
5296 		dpll |= PLL_REF_INPUT_TVCLKINBC;
5297 	else if (is_tv)
5298 		/* XXX: just matching BIOS for now */
5299 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
5300 		dpll |= 3;
5301 	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5302 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5303 	else
5304 		dpll |= PLL_REF_INPUT_DREFCLK;
5305 
5306 	/* setup pipeconf */
5307 	pipeconf = I915_READ(PIPECONF(pipe));
5308 
5309 	/* Set up the display plane register */
5310 	dspcntr = DISPPLANE_GAMMA_ENABLE;
5311 
5312 	if (pipe == 0)
5313 		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5314 	else
5315 		dspcntr |= DISPPLANE_SEL_PIPE_B;
5316 
5317 	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5318 		/* Enable pixel doubling when the dot clock is > 90% of the (display)
5319 		 * core speed.
5320 		 *
5321 		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5322 		 * pipe == 0 check?
5323 		 */
5324 		if (mode->clock >
5325 		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5326 			pipeconf |= PIPECONF_DOUBLE_WIDE;
5327 		else
5328 			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5329 	}
5330 
5331 	/* default to 8bpc */
5332 	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5333 	if (is_dp) {
5334 		if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5335 			pipeconf |= PIPECONF_BPP_6 |
5336 				    PIPECONF_DITHER_EN |
5337 				    PIPECONF_DITHER_TYPE_SP;
5338 		}
5339 	}
5340 
5341 	dpll |= DPLL_VCO_ENABLE;
5342 
5343 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5344 	drm_mode_debug_printmodeline(mode);
5345 
5346 	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5347 
5348 	POSTING_READ(DPLL(pipe));
5349 	udelay(150);
5350 
5351 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5352 	 * This is an exception to the general rule that mode_set doesn't turn
5353 	 * things on.
5354 	 */
5355 	if (is_lvds) {
5356 		temp = I915_READ(LVDS);
5357 		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5358 		if (pipe == 1) {
5359 			temp |= LVDS_PIPEB_SELECT;
5360 		} else {
5361 			temp &= ~LVDS_PIPEB_SELECT;
5362 		}
5363 		/* set the corresponsding LVDS_BORDER bit */
5364 		temp |= dev_priv->lvds_border_bits;
5365 		/* Set the B0-B3 data pairs corresponding to whether we're going to
5366 		 * set the DPLLs for dual-channel mode or not.
5367 		 */
5368 		if (clock.p2 == 7)
5369 			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5370 		else
5371 			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5372 
5373 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5374 		 * appropriately here, but we need to look more thoroughly into how
5375 		 * panels behave in the two modes.
5376 		 */
5377 		/* set the dithering flag on LVDS as needed */
5378 		if (INTEL_INFO(dev)->gen >= 4) {
5379 			if (dev_priv->lvds_dither)
5380 				temp |= LVDS_ENABLE_DITHER;
5381 			else
5382 				temp &= ~LVDS_ENABLE_DITHER;
5383 		}
5384 		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5385 			lvds_sync |= LVDS_HSYNC_POLARITY;
5386 		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5387 			lvds_sync |= LVDS_VSYNC_POLARITY;
5388 		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5389 		    != lvds_sync) {
5390 			char flags[2] = "-+";
5391 			DRM_INFO("Changing LVDS panel from "
5392 				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5393 				 flags[!(temp & LVDS_HSYNC_POLARITY)],
5394 				 flags[!(temp & LVDS_VSYNC_POLARITY)],
5395 				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5396 				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5397 			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5398 			temp |= lvds_sync;
5399 		}
5400 		I915_WRITE(LVDS, temp);
5401 	}
5402 
5403 	if (is_dp) {
5404 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
5405 	}
5406 
5407 	I915_WRITE(DPLL(pipe), dpll);
5408 
5409 	/* Wait for the clocks to stabilize. */
5410 	POSTING_READ(DPLL(pipe));
5411 	udelay(150);
5412 
5413 	if (INTEL_INFO(dev)->gen >= 4) {
5414 		temp = 0;
5415 		if (is_sdvo) {
5416 			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5417 			if (temp > 1)
5418 				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5419 			else
5420 				temp = 0;
5421 		}
5422 		I915_WRITE(DPLL_MD(pipe), temp);
5423 	} else {
5424 		/* The pixel multiplier can only be updated once the
5425 		 * DPLL is enabled and the clocks are stable.
5426 		 *
5427 		 * So write it again.
5428 		 */
5429 		I915_WRITE(DPLL(pipe), dpll);
5430 	}
5431 
5432 	if (HAS_PIPE_CXSR(dev)) {
5433 		if (intel_crtc->lowfreq_avail) {
5434 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5435 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5436 		} else {
5437 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5438 			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5439 		}
5440 	}
5441 
5442 	pipeconf &= ~PIPECONF_INTERLACE_MASK;
5443 	if (!IS_GEN2(dev) &&
5444 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5445 		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5446 		/* the chip adds 2 halflines automatically */
5447 		adjusted_mode->crtc_vtotal -= 1;
5448 		adjusted_mode->crtc_vblank_end -= 1;
5449 		vsyncshift = adjusted_mode->crtc_hsync_start
5450 			     - adjusted_mode->crtc_htotal/2;
5451 	} else {
5452 		pipeconf |= PIPECONF_PROGRESSIVE;
5453 		vsyncshift = 0;
5454 	}
5455 
5456 	if (!IS_GEN3(dev))
5457 		I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5458 
5459 	I915_WRITE(HTOTAL(pipe),
5460 		   (adjusted_mode->crtc_hdisplay - 1) |
5461 		   ((adjusted_mode->crtc_htotal - 1) << 16));
5462 	I915_WRITE(HBLANK(pipe),
5463 		   (adjusted_mode->crtc_hblank_start - 1) |
5464 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5465 	I915_WRITE(HSYNC(pipe),
5466 		   (adjusted_mode->crtc_hsync_start - 1) |
5467 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5468 
5469 	I915_WRITE(VTOTAL(pipe),
5470 		   (adjusted_mode->crtc_vdisplay - 1) |
5471 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
5472 	I915_WRITE(VBLANK(pipe),
5473 		   (adjusted_mode->crtc_vblank_start - 1) |
5474 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
5475 	I915_WRITE(VSYNC(pipe),
5476 		   (adjusted_mode->crtc_vsync_start - 1) |
5477 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5478 
5479 	/* pipesrc and dspsize control the size that is scaled from,
5480 	 * which should always be the user's requested size.
5481 	 */
5482 	I915_WRITE(DSPSIZE(plane),
5483 		   ((mode->vdisplay - 1) << 16) |
5484 		   (mode->hdisplay - 1));
5485 	I915_WRITE(DSPPOS(plane), 0);
5486 	I915_WRITE(PIPESRC(pipe),
5487 		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5488 
5489 	I915_WRITE(PIPECONF(pipe), pipeconf);
5490 	POSTING_READ(PIPECONF(pipe));
5491 	intel_enable_pipe(dev_priv, pipe, false);
5492 
5493 	intel_wait_for_vblank(dev, pipe);
5494 
5495 	I915_WRITE(DSPCNTR(plane), dspcntr);
5496 	POSTING_READ(DSPCNTR(plane));
5497 	intel_enable_plane(dev_priv, plane, pipe);
5498 
5499 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
5500 
5501 	intel_update_watermarks(dev);
5502 
5503 	return ret;
5504 }
5505 
5506 /*
5507  * Initialize reference clocks when the driver loads
5508  */
ironlake_init_pch_refclk(struct drm_device * dev)5509 void ironlake_init_pch_refclk(struct drm_device *dev)
5510 {
5511 	struct drm_i915_private *dev_priv = dev->dev_private;
5512 	struct drm_mode_config *mode_config = &dev->mode_config;
5513 	struct intel_encoder *encoder;
5514 	u32 temp;
5515 	bool has_lvds = false;
5516 	bool has_cpu_edp = false;
5517 	bool has_pch_edp = false;
5518 	bool has_panel = false;
5519 	bool has_ck505 = false;
5520 	bool can_ssc = false;
5521 
5522 	/* We need to take the global config into account */
5523 	list_for_each_entry(encoder, &mode_config->encoder_list,
5524 			    base.head) {
5525 		switch (encoder->type) {
5526 		case INTEL_OUTPUT_LVDS:
5527 			has_panel = true;
5528 			has_lvds = true;
5529 			break;
5530 		case INTEL_OUTPUT_EDP:
5531 			has_panel = true;
5532 			if (intel_encoder_is_pch_edp(&encoder->base))
5533 				has_pch_edp = true;
5534 			else
5535 				has_cpu_edp = true;
5536 			break;
5537 		}
5538 	}
5539 
5540 	if (HAS_PCH_IBX(dev)) {
5541 		has_ck505 = dev_priv->display_clock_mode;
5542 		can_ssc = has_ck505;
5543 	} else {
5544 		has_ck505 = false;
5545 		can_ssc = true;
5546 	}
5547 
5548 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5549 		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5550 		      has_ck505);
5551 
5552 	/* Ironlake: try to setup display ref clock before DPLL
5553 	 * enabling. This is only under driver's control after
5554 	 * PCH B stepping, previous chipset stepping should be
5555 	 * ignoring this setting.
5556 	 */
5557 	temp = I915_READ(PCH_DREF_CONTROL);
5558 	/* Always enable nonspread source */
5559 	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5560 
5561 	if (has_ck505)
5562 		temp |= DREF_NONSPREAD_CK505_ENABLE;
5563 	else
5564 		temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5565 
5566 	if (has_panel) {
5567 		temp &= ~DREF_SSC_SOURCE_MASK;
5568 		temp |= DREF_SSC_SOURCE_ENABLE;
5569 
5570 		/* SSC must be turned on before enabling the CPU output  */
5571 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5572 			DRM_DEBUG_KMS("Using SSC on panel\n");
5573 			temp |= DREF_SSC1_ENABLE;
5574 		} else
5575 			temp &= ~DREF_SSC1_ENABLE;
5576 
5577 		/* Get SSC going before enabling the outputs */
5578 		I915_WRITE(PCH_DREF_CONTROL, temp);
5579 		POSTING_READ(PCH_DREF_CONTROL);
5580 		udelay(200);
5581 
5582 		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5583 
5584 		/* Enable CPU source on CPU attached eDP */
5585 		if (has_cpu_edp) {
5586 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5587 				DRM_DEBUG_KMS("Using SSC on eDP\n");
5588 				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5589 			}
5590 			else
5591 				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5592 		} else
5593 			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5594 
5595 		I915_WRITE(PCH_DREF_CONTROL, temp);
5596 		POSTING_READ(PCH_DREF_CONTROL);
5597 		udelay(200);
5598 	} else {
5599 		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5600 
5601 		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5602 
5603 		/* Turn off CPU output */
5604 		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5605 
5606 		I915_WRITE(PCH_DREF_CONTROL, temp);
5607 		POSTING_READ(PCH_DREF_CONTROL);
5608 		udelay(200);
5609 
5610 		/* Turn off the SSC source */
5611 		temp &= ~DREF_SSC_SOURCE_MASK;
5612 		temp |= DREF_SSC_SOURCE_DISABLE;
5613 
5614 		/* Turn off SSC1 */
5615 		temp &= ~ DREF_SSC1_ENABLE;
5616 
5617 		I915_WRITE(PCH_DREF_CONTROL, temp);
5618 		POSTING_READ(PCH_DREF_CONTROL);
5619 		udelay(200);
5620 	}
5621 }
5622 
ironlake_get_refclk(struct drm_crtc * crtc)5623 static int ironlake_get_refclk(struct drm_crtc *crtc)
5624 {
5625 	struct drm_device *dev = crtc->dev;
5626 	struct drm_i915_private *dev_priv = dev->dev_private;
5627 	struct intel_encoder *encoder;
5628 	struct drm_mode_config *mode_config = &dev->mode_config;
5629 	struct intel_encoder *edp_encoder = NULL;
5630 	int num_connectors = 0;
5631 	bool is_lvds = false;
5632 
5633 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5634 		if (encoder->base.crtc != crtc)
5635 			continue;
5636 
5637 		switch (encoder->type) {
5638 		case INTEL_OUTPUT_LVDS:
5639 			is_lvds = true;
5640 			break;
5641 		case INTEL_OUTPUT_EDP:
5642 			edp_encoder = encoder;
5643 			break;
5644 		}
5645 		num_connectors++;
5646 	}
5647 
5648 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5649 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5650 			      dev_priv->lvds_ssc_freq);
5651 		return dev_priv->lvds_ssc_freq * 1000;
5652 	}
5653 
5654 	return 120000;
5655 }
5656 
ironlake_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)5657 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5658 				  struct drm_display_mode *mode,
5659 				  struct drm_display_mode *adjusted_mode,
5660 				  int x, int y,
5661 				  struct drm_framebuffer *old_fb)
5662 {
5663 	struct drm_device *dev = crtc->dev;
5664 	struct drm_i915_private *dev_priv = dev->dev_private;
5665 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5666 	int pipe = intel_crtc->pipe;
5667 	int plane = intel_crtc->plane;
5668 	int refclk, num_connectors = 0;
5669 	intel_clock_t clock, reduced_clock;
5670 	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5671 	bool ok, has_reduced_clock = false, is_sdvo = false;
5672 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5673 	struct intel_encoder *has_edp_encoder = NULL;
5674 	struct drm_mode_config *mode_config = &dev->mode_config;
5675 	struct intel_encoder *encoder;
5676 	const intel_limit_t *limit;
5677 	int ret;
5678 	struct fdi_m_n m_n = {0};
5679 	u32 temp;
5680 	u32 lvds_sync = 0;
5681 	int target_clock, pixel_multiplier, lane, link_bw, factor;
5682 	unsigned int pipe_bpp;
5683 	bool dither;
5684 
5685 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5686 		if (encoder->base.crtc != crtc)
5687 			continue;
5688 
5689 		switch (encoder->type) {
5690 		case INTEL_OUTPUT_LVDS:
5691 			is_lvds = true;
5692 			break;
5693 		case INTEL_OUTPUT_SDVO:
5694 		case INTEL_OUTPUT_HDMI:
5695 			is_sdvo = true;
5696 			if (encoder->needs_tv_clock)
5697 				is_tv = true;
5698 			break;
5699 		case INTEL_OUTPUT_TVOUT:
5700 			is_tv = true;
5701 			break;
5702 		case INTEL_OUTPUT_ANALOG:
5703 			is_crt = true;
5704 			break;
5705 		case INTEL_OUTPUT_DISPLAYPORT:
5706 			is_dp = true;
5707 			break;
5708 		case INTEL_OUTPUT_EDP:
5709 			has_edp_encoder = encoder;
5710 			break;
5711 		}
5712 
5713 		num_connectors++;
5714 	}
5715 
5716 	refclk = ironlake_get_refclk(crtc);
5717 
5718 	/*
5719 	 * Returns a set of divisors for the desired target clock with the given
5720 	 * refclk, or FALSE.  The returned values represent the clock equation:
5721 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5722 	 */
5723 	limit = intel_limit(crtc, refclk);
5724 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5725 			     &clock);
5726 	if (!ok) {
5727 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5728 		return -EINVAL;
5729 	}
5730 
5731 	/* Ensure that the cursor is valid for the new mode before changing... */
5732 	intel_crtc_update_cursor(crtc, true);
5733 
5734 	if (is_lvds && dev_priv->lvds_downclock_avail) {
5735 		/*
5736 		 * Ensure we match the reduced clock's P to the target clock.
5737 		 * If the clocks don't match, we can't switch the display clock
5738 		 * by using the FP0/FP1. In such case we will disable the LVDS
5739 		 * downclock feature.
5740 		*/
5741 		has_reduced_clock = limit->find_pll(limit, crtc,
5742 						    dev_priv->lvds_downclock,
5743 						    refclk,
5744 						    &clock,
5745 						    &reduced_clock);
5746 	}
5747 	/* SDVO TV has fixed PLL values depend on its clock range,
5748 	   this mirrors vbios setting. */
5749 	if (is_sdvo && is_tv) {
5750 		if (adjusted_mode->clock >= 100000
5751 		    && adjusted_mode->clock < 140500) {
5752 			clock.p1 = 2;
5753 			clock.p2 = 10;
5754 			clock.n = 3;
5755 			clock.m1 = 16;
5756 			clock.m2 = 8;
5757 		} else if (adjusted_mode->clock >= 140500
5758 			   && adjusted_mode->clock <= 200000) {
5759 			clock.p1 = 1;
5760 			clock.p2 = 10;
5761 			clock.n = 6;
5762 			clock.m1 = 12;
5763 			clock.m2 = 8;
5764 		}
5765 	}
5766 
5767 	/* FDI link */
5768 	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5769 	lane = 0;
5770 	/* CPU eDP doesn't require FDI link, so just set DP M/N
5771 	   according to current link config */
5772 	if (has_edp_encoder &&
5773 	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5774 		target_clock = mode->clock;
5775 		intel_edp_link_config(has_edp_encoder,
5776 				      &lane, &link_bw);
5777 	} else {
5778 		/* [e]DP over FDI requires target mode clock
5779 		   instead of link clock */
5780 		if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5781 			target_clock = mode->clock;
5782 		else
5783 			target_clock = adjusted_mode->clock;
5784 
5785 		/* FDI is a binary signal running at ~2.7GHz, encoding
5786 		 * each output octet as 10 bits. The actual frequency
5787 		 * is stored as a divider into a 100MHz clock, and the
5788 		 * mode pixel clock is stored in units of 1KHz.
5789 		 * Hence the bw of each lane in terms of the mode signal
5790 		 * is:
5791 		 */
5792 		link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5793 	}
5794 
5795 	/* determine panel color depth */
5796 	temp = I915_READ(PIPECONF(pipe));
5797 	temp &= ~PIPE_BPC_MASK;
5798 	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
5799 	switch (pipe_bpp) {
5800 	case 18:
5801 		temp |= PIPE_6BPC;
5802 		break;
5803 	case 24:
5804 		temp |= PIPE_8BPC;
5805 		break;
5806 	case 30:
5807 		temp |= PIPE_10BPC;
5808 		break;
5809 	case 36:
5810 		temp |= PIPE_12BPC;
5811 		break;
5812 	default:
5813 		WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5814 			pipe_bpp);
5815 		temp |= PIPE_8BPC;
5816 		pipe_bpp = 24;
5817 		break;
5818 	}
5819 
5820 	intel_crtc->bpp = pipe_bpp;
5821 	I915_WRITE(PIPECONF(pipe), temp);
5822 
5823 	if (!lane) {
5824 		/*
5825 		 * Account for spread spectrum to avoid
5826 		 * oversubscribing the link. Max center spread
5827 		 * is 2.5%; use 5% for safety's sake.
5828 		 */
5829 		u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5830 		lane = bps / (link_bw * 8) + 1;
5831 	}
5832 
5833 	intel_crtc->fdi_lanes = lane;
5834 
5835 	if (pixel_multiplier > 1)
5836 		link_bw *= pixel_multiplier;
5837 	ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5838 			     &m_n);
5839 
5840 	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5841 	if (has_reduced_clock)
5842 		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5843 			reduced_clock.m2;
5844 
5845 	/* Enable autotuning of the PLL clock (if permissible) */
5846 	factor = 21;
5847 	if (is_lvds) {
5848 		if ((intel_panel_use_ssc(dev_priv) &&
5849 		     dev_priv->lvds_ssc_freq == 100) ||
5850 		    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5851 			factor = 25;
5852 	} else if (is_sdvo && is_tv)
5853 		factor = 20;
5854 
5855 	if (clock.m < factor * clock.n)
5856 		fp |= FP_CB_TUNE;
5857 
5858 	dpll = 0;
5859 
5860 	if (is_lvds)
5861 		dpll |= DPLLB_MODE_LVDS;
5862 	else
5863 		dpll |= DPLLB_MODE_DAC_SERIAL;
5864 	if (is_sdvo) {
5865 		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5866 		if (pixel_multiplier > 1) {
5867 			dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5868 		}
5869 		dpll |= DPLL_DVO_HIGH_SPEED;
5870 	}
5871 	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5872 		dpll |= DPLL_DVO_HIGH_SPEED;
5873 
5874 	/* compute bitmask from p1 value */
5875 	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5876 	/* also FPA1 */
5877 	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5878 
5879 	switch (clock.p2) {
5880 	case 5:
5881 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5882 		break;
5883 	case 7:
5884 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5885 		break;
5886 	case 10:
5887 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5888 		break;
5889 	case 14:
5890 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5891 		break;
5892 	}
5893 
5894 	if (is_sdvo && is_tv)
5895 		dpll |= PLL_REF_INPUT_TVCLKINBC;
5896 	else if (is_tv)
5897 		/* XXX: just matching BIOS for now */
5898 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
5899 		dpll |= 3;
5900 	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5901 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5902 	else
5903 		dpll |= PLL_REF_INPUT_DREFCLK;
5904 
5905 	/* setup pipeconf */
5906 	pipeconf = I915_READ(PIPECONF(pipe));
5907 
5908 	/* Set up the display plane register */
5909 	dspcntr = DISPPLANE_GAMMA_ENABLE;
5910 
5911 	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5912 	drm_mode_debug_printmodeline(mode);
5913 
5914 	/* PCH eDP needs FDI, but CPU eDP does not */
5915 	if (!intel_crtc->no_pll) {
5916 		if (!has_edp_encoder ||
5917 		    intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5918 			I915_WRITE(PCH_FP0(pipe), fp);
5919 			I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5920 
5921 			POSTING_READ(PCH_DPLL(pipe));
5922 			udelay(150);
5923 		}
5924 	} else {
5925 		if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5926 		    fp == I915_READ(PCH_FP0(0))) {
5927 			intel_crtc->use_pll_a = true;
5928 			DRM_DEBUG_KMS("using pipe a dpll\n");
5929 		} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5930 			   fp == I915_READ(PCH_FP0(1))) {
5931 			intel_crtc->use_pll_a = false;
5932 			DRM_DEBUG_KMS("using pipe b dpll\n");
5933 		} else {
5934 			DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5935 			return -EINVAL;
5936 		}
5937 	}
5938 
5939 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5940 	 * This is an exception to the general rule that mode_set doesn't turn
5941 	 * things on.
5942 	 */
5943 	if (is_lvds) {
5944 		temp = I915_READ(PCH_LVDS);
5945 		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5946 		if (HAS_PCH_CPT(dev)) {
5947 			temp &= ~PORT_TRANS_SEL_MASK;
5948 			temp |= PORT_TRANS_SEL_CPT(pipe);
5949 		} else {
5950 			if (pipe == 1)
5951 				temp |= LVDS_PIPEB_SELECT;
5952 			else
5953 				temp &= ~LVDS_PIPEB_SELECT;
5954 		}
5955 
5956 		/* set the corresponsding LVDS_BORDER bit */
5957 		temp |= dev_priv->lvds_border_bits;
5958 		/* Set the B0-B3 data pairs corresponding to whether we're going to
5959 		 * set the DPLLs for dual-channel mode or not.
5960 		 */
5961 		if (clock.p2 == 7)
5962 			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5963 		else
5964 			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5965 
5966 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5967 		 * appropriately here, but we need to look more thoroughly into how
5968 		 * panels behave in the two modes.
5969 		 */
5970 		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5971 			lvds_sync |= LVDS_HSYNC_POLARITY;
5972 		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5973 			lvds_sync |= LVDS_VSYNC_POLARITY;
5974 		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5975 		    != lvds_sync) {
5976 			char flags[2] = "-+";
5977 			DRM_INFO("Changing LVDS panel from "
5978 				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5979 				 flags[!(temp & LVDS_HSYNC_POLARITY)],
5980 				 flags[!(temp & LVDS_VSYNC_POLARITY)],
5981 				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5982 				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5983 			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5984 			temp |= lvds_sync;
5985 		}
5986 		I915_WRITE(PCH_LVDS, temp);
5987 	}
5988 
5989 	pipeconf &= ~PIPECONF_DITHER_EN;
5990 	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5991 	if ((is_lvds && dev_priv->lvds_dither) || dither) {
5992 		pipeconf |= PIPECONF_DITHER_EN;
5993 		pipeconf |= PIPECONF_DITHER_TYPE_SP;
5994 	}
5995 	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5996 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
5997 	} else {
5998 		/* For non-DP output, clear any trans DP clock recovery setting.*/
5999 		I915_WRITE(TRANSDATA_M1(pipe), 0);
6000 		I915_WRITE(TRANSDATA_N1(pipe), 0);
6001 		I915_WRITE(TRANSDPLINK_M1(pipe), 0);
6002 		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6003 	}
6004 
6005 	if (!intel_crtc->no_pll &&
6006 	    (!has_edp_encoder ||
6007 	     intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6008 		I915_WRITE(PCH_DPLL(pipe), dpll);
6009 
6010 		/* Wait for the clocks to stabilize. */
6011 		POSTING_READ(PCH_DPLL(pipe));
6012 		udelay(150);
6013 
6014 		/* The pixel multiplier can only be updated once the
6015 		 * DPLL is enabled and the clocks are stable.
6016 		 *
6017 		 * So write it again.
6018 		 */
6019 		I915_WRITE(PCH_DPLL(pipe), dpll);
6020 	}
6021 
6022 	intel_crtc->lowfreq_avail = false;
6023 	if (!intel_crtc->no_pll) {
6024 		if (is_lvds && has_reduced_clock && i915_powersave) {
6025 			I915_WRITE(PCH_FP1(pipe), fp2);
6026 			intel_crtc->lowfreq_avail = true;
6027 			if (HAS_PIPE_CXSR(dev)) {
6028 				DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6029 				pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6030 			}
6031 		} else {
6032 			I915_WRITE(PCH_FP1(pipe), fp);
6033 			if (HAS_PIPE_CXSR(dev)) {
6034 				DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6035 				pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
6036 			}
6037 		}
6038 	}
6039 
6040 	pipeconf &= ~PIPECONF_INTERLACE_MASK;
6041 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6042 		pipeconf |= PIPECONF_INTERLACED_ILK;
6043 		/* the chip adds 2 halflines automatically */
6044 		adjusted_mode->crtc_vtotal -= 1;
6045 		adjusted_mode->crtc_vblank_end -= 1;
6046 		I915_WRITE(VSYNCSHIFT(pipe),
6047 			   adjusted_mode->crtc_hsync_start
6048 			   - adjusted_mode->crtc_htotal/2);
6049 	} else {
6050 		pipeconf |= PIPECONF_PROGRESSIVE;
6051 		I915_WRITE(VSYNCSHIFT(pipe), 0);
6052 	}
6053 
6054 	I915_WRITE(HTOTAL(pipe),
6055 		   (adjusted_mode->crtc_hdisplay - 1) |
6056 		   ((adjusted_mode->crtc_htotal - 1) << 16));
6057 	I915_WRITE(HBLANK(pipe),
6058 		   (adjusted_mode->crtc_hblank_start - 1) |
6059 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
6060 	I915_WRITE(HSYNC(pipe),
6061 		   (adjusted_mode->crtc_hsync_start - 1) |
6062 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
6063 
6064 	I915_WRITE(VTOTAL(pipe),
6065 		   (adjusted_mode->crtc_vdisplay - 1) |
6066 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
6067 	I915_WRITE(VBLANK(pipe),
6068 		   (adjusted_mode->crtc_vblank_start - 1) |
6069 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
6070 	I915_WRITE(VSYNC(pipe),
6071 		   (adjusted_mode->crtc_vsync_start - 1) |
6072 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
6073 
6074 	/* pipesrc controls the size that is scaled from, which should
6075 	 * always be the user's requested size.
6076 	 */
6077 	I915_WRITE(PIPESRC(pipe),
6078 		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
6079 
6080 	I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
6081 	I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
6082 	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6083 	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6084 
6085 	if (has_edp_encoder &&
6086 	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6087 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6088 	}
6089 
6090 	I915_WRITE(PIPECONF(pipe), pipeconf);
6091 	POSTING_READ(PIPECONF(pipe));
6092 
6093 	intel_wait_for_vblank(dev, pipe);
6094 
6095 	I915_WRITE(DSPCNTR(plane), dspcntr);
6096 	POSTING_READ(DSPCNTR(plane));
6097 
6098 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
6099 
6100 	intel_update_watermarks(dev);
6101 
6102 	return ret;
6103 }
6104 
intel_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)6105 static int intel_crtc_mode_set(struct drm_crtc *crtc,
6106 			       struct drm_display_mode *mode,
6107 			       struct drm_display_mode *adjusted_mode,
6108 			       int x, int y,
6109 			       struct drm_framebuffer *old_fb)
6110 {
6111 	struct drm_device *dev = crtc->dev;
6112 	struct drm_i915_private *dev_priv = dev->dev_private;
6113 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6114 	int pipe = intel_crtc->pipe;
6115 	int ret;
6116 
6117 	drm_vblank_pre_modeset(dev, pipe);
6118 
6119 	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
6120 					      x, y, old_fb);
6121 	drm_vblank_post_modeset(dev, pipe);
6122 
6123 	if (ret)
6124 		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
6125 	else
6126 		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
6127 
6128 	return ret;
6129 }
6130 
intel_eld_uptodate(struct drm_connector * connector,int reg_eldv,uint32_t bits_eldv,int reg_elda,uint32_t bits_elda,int reg_edid)6131 static bool intel_eld_uptodate(struct drm_connector *connector,
6132 			       int reg_eldv, uint32_t bits_eldv,
6133 			       int reg_elda, uint32_t bits_elda,
6134 			       int reg_edid)
6135 {
6136 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6137 	uint8_t *eld = connector->eld;
6138 	uint32_t i;
6139 
6140 	i = I915_READ(reg_eldv);
6141 	i &= bits_eldv;
6142 
6143 	if (!eld[0])
6144 		return !i;
6145 
6146 	if (!i)
6147 		return false;
6148 
6149 	i = I915_READ(reg_elda);
6150 	i &= ~bits_elda;
6151 	I915_WRITE(reg_elda, i);
6152 
6153 	for (i = 0; i < eld[2]; i++)
6154 		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6155 			return false;
6156 
6157 	return true;
6158 }
6159 
g4x_write_eld(struct drm_connector * connector,struct drm_crtc * crtc)6160 static void g4x_write_eld(struct drm_connector *connector,
6161 			  struct drm_crtc *crtc)
6162 {
6163 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6164 	uint8_t *eld = connector->eld;
6165 	uint32_t eldv;
6166 	uint32_t len;
6167 	uint32_t i;
6168 
6169 	i = I915_READ(G4X_AUD_VID_DID);
6170 
6171 	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6172 		eldv = G4X_ELDV_DEVCL_DEVBLC;
6173 	else
6174 		eldv = G4X_ELDV_DEVCTG;
6175 
6176 	if (intel_eld_uptodate(connector,
6177 			       G4X_AUD_CNTL_ST, eldv,
6178 			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6179 			       G4X_HDMIW_HDMIEDID))
6180 		return;
6181 
6182 	i = I915_READ(G4X_AUD_CNTL_ST);
6183 	i &= ~(eldv | G4X_ELD_ADDR);
6184 	len = (i >> 9) & 0x1f;		/* ELD buffer size */
6185 	I915_WRITE(G4X_AUD_CNTL_ST, i);
6186 
6187 	if (!eld[0])
6188 		return;
6189 
6190 	len = min_t(uint8_t, eld[2], len);
6191 	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6192 	for (i = 0; i < len; i++)
6193 		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6194 
6195 	i = I915_READ(G4X_AUD_CNTL_ST);
6196 	i |= eldv;
6197 	I915_WRITE(G4X_AUD_CNTL_ST, i);
6198 }
6199 
ironlake_write_eld(struct drm_connector * connector,struct drm_crtc * crtc)6200 static void ironlake_write_eld(struct drm_connector *connector,
6201 				     struct drm_crtc *crtc)
6202 {
6203 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6204 	uint8_t *eld = connector->eld;
6205 	uint32_t eldv;
6206 	uint32_t i;
6207 	int len;
6208 	int hdmiw_hdmiedid;
6209 	int aud_config;
6210 	int aud_cntl_st;
6211 	int aud_cntrl_st2;
6212 
6213 	if (HAS_PCH_IBX(connector->dev)) {
6214 		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
6215 		aud_config = IBX_AUD_CONFIG_A;
6216 		aud_cntl_st = IBX_AUD_CNTL_ST_A;
6217 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6218 	} else {
6219 		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
6220 		aud_config = CPT_AUD_CONFIG_A;
6221 		aud_cntl_st = CPT_AUD_CNTL_ST_A;
6222 		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6223 	}
6224 
6225 	i = to_intel_crtc(crtc)->pipe;
6226 	hdmiw_hdmiedid += i * 0x100;
6227 	aud_cntl_st += i * 0x100;
6228 	aud_config += i * 0x100;
6229 
6230 	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
6231 
6232 	i = I915_READ(aud_cntl_st);
6233 	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
6234 	if (!i) {
6235 		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6236 		/* operate blindly on all ports */
6237 		eldv = IBX_ELD_VALIDB;
6238 		eldv |= IBX_ELD_VALIDB << 4;
6239 		eldv |= IBX_ELD_VALIDB << 8;
6240 	} else {
6241 		DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
6242 		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6243 	}
6244 
6245 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6246 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6247 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6248 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6249 	} else
6250 		I915_WRITE(aud_config, 0);
6251 
6252 	if (intel_eld_uptodate(connector,
6253 			       aud_cntrl_st2, eldv,
6254 			       aud_cntl_st, IBX_ELD_ADDRESS,
6255 			       hdmiw_hdmiedid))
6256 		return;
6257 
6258 	i = I915_READ(aud_cntrl_st2);
6259 	i &= ~eldv;
6260 	I915_WRITE(aud_cntrl_st2, i);
6261 
6262 	if (!eld[0])
6263 		return;
6264 
6265 	i = I915_READ(aud_cntl_st);
6266 	i &= ~IBX_ELD_ADDRESS;
6267 	I915_WRITE(aud_cntl_st, i);
6268 
6269 	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
6270 	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6271 	for (i = 0; i < len; i++)
6272 		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6273 
6274 	i = I915_READ(aud_cntrl_st2);
6275 	i |= eldv;
6276 	I915_WRITE(aud_cntrl_st2, i);
6277 }
6278 
intel_write_eld(struct drm_encoder * encoder,struct drm_display_mode * mode)6279 void intel_write_eld(struct drm_encoder *encoder,
6280 		     struct drm_display_mode *mode)
6281 {
6282 	struct drm_crtc *crtc = encoder->crtc;
6283 	struct drm_connector *connector;
6284 	struct drm_device *dev = encoder->dev;
6285 	struct drm_i915_private *dev_priv = dev->dev_private;
6286 
6287 	connector = drm_select_eld(encoder, mode);
6288 	if (!connector)
6289 		return;
6290 
6291 	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6292 			 connector->base.id,
6293 			 drm_get_connector_name(connector),
6294 			 connector->encoder->base.id,
6295 			 drm_get_encoder_name(connector->encoder));
6296 
6297 	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6298 
6299 	if (dev_priv->display.write_eld)
6300 		dev_priv->display.write_eld(connector, crtc);
6301 }
6302 
6303 /** Loads the palette/gamma unit for the CRTC with the prepared values */
intel_crtc_load_lut(struct drm_crtc * crtc)6304 void intel_crtc_load_lut(struct drm_crtc *crtc)
6305 {
6306 	struct drm_device *dev = crtc->dev;
6307 	struct drm_i915_private *dev_priv = dev->dev_private;
6308 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6309 	int palreg = PALETTE(intel_crtc->pipe);
6310 	int i;
6311 
6312 	/* The clocks have to be on to load the palette. */
6313 	if (!crtc->enabled || !intel_crtc->active)
6314 		return;
6315 
6316 	/* use legacy palette for Ironlake */
6317 	if (HAS_PCH_SPLIT(dev))
6318 		palreg = LGC_PALETTE(intel_crtc->pipe);
6319 
6320 	for (i = 0; i < 256; i++) {
6321 		I915_WRITE(palreg + 4 * i,
6322 			   (intel_crtc->lut_r[i] << 16) |
6323 			   (intel_crtc->lut_g[i] << 8) |
6324 			   intel_crtc->lut_b[i]);
6325 	}
6326 }
6327 
i845_update_cursor(struct drm_crtc * crtc,u32 base)6328 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6329 {
6330 	struct drm_device *dev = crtc->dev;
6331 	struct drm_i915_private *dev_priv = dev->dev_private;
6332 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6333 	bool visible = base != 0;
6334 	u32 cntl;
6335 
6336 	if (intel_crtc->cursor_visible == visible)
6337 		return;
6338 
6339 	cntl = I915_READ(_CURACNTR);
6340 	if (visible) {
6341 		/* On these chipsets we can only modify the base whilst
6342 		 * the cursor is disabled.
6343 		 */
6344 		I915_WRITE(_CURABASE, base);
6345 
6346 		cntl &= ~(CURSOR_FORMAT_MASK);
6347 		/* XXX width must be 64, stride 256 => 0x00 << 28 */
6348 		cntl |= CURSOR_ENABLE |
6349 			CURSOR_GAMMA_ENABLE |
6350 			CURSOR_FORMAT_ARGB;
6351 	} else
6352 		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6353 	I915_WRITE(_CURACNTR, cntl);
6354 
6355 	intel_crtc->cursor_visible = visible;
6356 }
6357 
i9xx_update_cursor(struct drm_crtc * crtc,u32 base)6358 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6359 {
6360 	struct drm_device *dev = crtc->dev;
6361 	struct drm_i915_private *dev_priv = dev->dev_private;
6362 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6363 	int pipe = intel_crtc->pipe;
6364 	bool visible = base != 0;
6365 
6366 	if (intel_crtc->cursor_visible != visible) {
6367 		uint32_t cntl = I915_READ(CURCNTR(pipe));
6368 		if (base) {
6369 			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6370 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6371 			cntl |= pipe << 28; /* Connect to correct pipe */
6372 		} else {
6373 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6374 			cntl |= CURSOR_MODE_DISABLE;
6375 		}
6376 		I915_WRITE(CURCNTR(pipe), cntl);
6377 
6378 		intel_crtc->cursor_visible = visible;
6379 	}
6380 	/* and commit changes on next vblank */
6381 	POSTING_READ(CURCNTR(pipe));
6382 	I915_WRITE(CURBASE(pipe), base);
6383 	POSTING_READ(CURBASE(pipe));
6384 }
6385 
ivb_update_cursor(struct drm_crtc * crtc,u32 base)6386 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6387 {
6388 	struct drm_device *dev = crtc->dev;
6389 	struct drm_i915_private *dev_priv = dev->dev_private;
6390 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6391 	int pipe = intel_crtc->pipe;
6392 	bool visible = base != 0;
6393 
6394 	if (intel_crtc->cursor_visible != visible) {
6395 		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6396 		if (base) {
6397 			cntl &= ~CURSOR_MODE;
6398 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6399 		} else {
6400 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6401 			cntl |= CURSOR_MODE_DISABLE;
6402 		}
6403 		I915_WRITE(CURCNTR_IVB(pipe), cntl);
6404 
6405 		intel_crtc->cursor_visible = visible;
6406 	}
6407 	/* and commit changes on next vblank */
6408 	POSTING_READ(CURCNTR_IVB(pipe));
6409 	I915_WRITE(CURBASE_IVB(pipe), base);
6410 	POSTING_READ(CURBASE_IVB(pipe));
6411 }
6412 
6413 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
intel_crtc_update_cursor(struct drm_crtc * crtc,bool on)6414 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6415 				     bool on)
6416 {
6417 	struct drm_device *dev = crtc->dev;
6418 	struct drm_i915_private *dev_priv = dev->dev_private;
6419 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6420 	int pipe = intel_crtc->pipe;
6421 	int x = intel_crtc->cursor_x;
6422 	int y = intel_crtc->cursor_y;
6423 	u32 base, pos;
6424 	bool visible;
6425 
6426 	pos = 0;
6427 
6428 	if (on && crtc->enabled && crtc->fb) {
6429 		base = intel_crtc->cursor_addr;
6430 		if (x > (int) crtc->fb->width)
6431 			base = 0;
6432 
6433 		if (y > (int) crtc->fb->height)
6434 			base = 0;
6435 	} else
6436 		base = 0;
6437 
6438 	if (x < 0) {
6439 		if (x + intel_crtc->cursor_width < 0)
6440 			base = 0;
6441 
6442 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6443 		x = -x;
6444 	}
6445 	pos |= x << CURSOR_X_SHIFT;
6446 
6447 	if (y < 0) {
6448 		if (y + intel_crtc->cursor_height < 0)
6449 			base = 0;
6450 
6451 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6452 		y = -y;
6453 	}
6454 	pos |= y << CURSOR_Y_SHIFT;
6455 
6456 	visible = base != 0;
6457 	if (!visible && !intel_crtc->cursor_visible)
6458 		return;
6459 
6460 	if (IS_IVYBRIDGE(dev)) {
6461 		I915_WRITE(CURPOS_IVB(pipe), pos);
6462 		ivb_update_cursor(crtc, base);
6463 	} else {
6464 		I915_WRITE(CURPOS(pipe), pos);
6465 		if (IS_845G(dev) || IS_I865G(dev))
6466 			i845_update_cursor(crtc, base);
6467 		else
6468 			i9xx_update_cursor(crtc, base);
6469 	}
6470 
6471 	if (visible)
6472 		intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6473 }
6474 
intel_crtc_cursor_set(struct drm_crtc * crtc,struct drm_file * file,uint32_t handle,uint32_t width,uint32_t height)6475 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6476 				 struct drm_file *file,
6477 				 uint32_t handle,
6478 				 uint32_t width, uint32_t height)
6479 {
6480 	struct drm_device *dev = crtc->dev;
6481 	struct drm_i915_private *dev_priv = dev->dev_private;
6482 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6483 	struct drm_i915_gem_object *obj;
6484 	uint32_t addr;
6485 	int ret;
6486 
6487 	DRM_DEBUG_KMS("\n");
6488 
6489 	/* if we want to turn off the cursor ignore width and height */
6490 	if (!handle) {
6491 		DRM_DEBUG_KMS("cursor off\n");
6492 		addr = 0;
6493 		obj = NULL;
6494 		mutex_lock(&dev->struct_mutex);
6495 		goto finish;
6496 	}
6497 
6498 	/* Currently we only support 64x64 cursors */
6499 	if (width != 64 || height != 64) {
6500 		DRM_ERROR("we currently only support 64x64 cursors\n");
6501 		return -EINVAL;
6502 	}
6503 
6504 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6505 	if (&obj->base == NULL)
6506 		return -ENOENT;
6507 
6508 	if (obj->base.size < width * height * 4) {
6509 		DRM_ERROR("buffer is to small\n");
6510 		ret = -ENOMEM;
6511 		goto fail;
6512 	}
6513 
6514 	/* we only need to pin inside GTT if cursor is non-phy */
6515 	mutex_lock(&dev->struct_mutex);
6516 	if (!dev_priv->info->cursor_needs_physical) {
6517 		if (obj->tiling_mode) {
6518 			DRM_ERROR("cursor cannot be tiled\n");
6519 			ret = -EINVAL;
6520 			goto fail_locked;
6521 		}
6522 
6523 		ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
6524 		if (ret) {
6525 			DRM_ERROR("failed to move cursor bo into the GTT\n");
6526 			goto fail_locked;
6527 		}
6528 
6529 		ret = i915_gem_object_put_fence(obj);
6530 		if (ret) {
6531 			DRM_ERROR("failed to release fence for cursor");
6532 			goto fail_unpin;
6533 		}
6534 
6535 		addr = obj->gtt_offset;
6536 	} else {
6537 		int align = IS_I830(dev) ? 16 * 1024 : 256;
6538 		ret = i915_gem_attach_phys_object(dev, obj,
6539 						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6540 						  align);
6541 		if (ret) {
6542 			DRM_ERROR("failed to attach phys object\n");
6543 			goto fail_locked;
6544 		}
6545 		addr = obj->phys_obj->handle->busaddr;
6546 	}
6547 
6548 	if (IS_GEN2(dev))
6549 		I915_WRITE(CURSIZE, (height << 12) | width);
6550 
6551  finish:
6552 	if (intel_crtc->cursor_bo) {
6553 		if (dev_priv->info->cursor_needs_physical) {
6554 			if (intel_crtc->cursor_bo != obj)
6555 				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6556 		} else
6557 			i915_gem_object_unpin(intel_crtc->cursor_bo);
6558 		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6559 	}
6560 
6561 	mutex_unlock(&dev->struct_mutex);
6562 
6563 	intel_crtc->cursor_addr = addr;
6564 	intel_crtc->cursor_bo = obj;
6565 	intel_crtc->cursor_width = width;
6566 	intel_crtc->cursor_height = height;
6567 
6568 	intel_crtc_update_cursor(crtc, true);
6569 
6570 	return 0;
6571 fail_unpin:
6572 	i915_gem_object_unpin(obj);
6573 fail_locked:
6574 	mutex_unlock(&dev->struct_mutex);
6575 fail:
6576 	drm_gem_object_unreference_unlocked(&obj->base);
6577 	return ret;
6578 }
6579 
intel_crtc_cursor_move(struct drm_crtc * crtc,int x,int y)6580 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6581 {
6582 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6583 
6584 	intel_crtc->cursor_x = x;
6585 	intel_crtc->cursor_y = y;
6586 
6587 	intel_crtc_update_cursor(crtc, true);
6588 
6589 	return 0;
6590 }
6591 
6592 /** Sets the color ramps on behalf of RandR */
intel_crtc_fb_gamma_set(struct drm_crtc * crtc,u16 red,u16 green,u16 blue,int regno)6593 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6594 				 u16 blue, int regno)
6595 {
6596 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6597 
6598 	intel_crtc->lut_r[regno] = red >> 8;
6599 	intel_crtc->lut_g[regno] = green >> 8;
6600 	intel_crtc->lut_b[regno] = blue >> 8;
6601 }
6602 
intel_crtc_fb_gamma_get(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,int regno)6603 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6604 			     u16 *blue, int regno)
6605 {
6606 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6607 
6608 	*red = intel_crtc->lut_r[regno] << 8;
6609 	*green = intel_crtc->lut_g[regno] << 8;
6610 	*blue = intel_crtc->lut_b[regno] << 8;
6611 }
6612 
intel_crtc_gamma_set(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,uint32_t start,uint32_t size)6613 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6614 				 u16 *blue, uint32_t start, uint32_t size)
6615 {
6616 	int end = (start + size > 256) ? 256 : start + size, i;
6617 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6618 
6619 	for (i = start; i < end; i++) {
6620 		intel_crtc->lut_r[i] = red[i] >> 8;
6621 		intel_crtc->lut_g[i] = green[i] >> 8;
6622 		intel_crtc->lut_b[i] = blue[i] >> 8;
6623 	}
6624 
6625 	intel_crtc_load_lut(crtc);
6626 }
6627 
6628 /**
6629  * Get a pipe with a simple mode set on it for doing load-based monitor
6630  * detection.
6631  *
6632  * It will be up to the load-detect code to adjust the pipe as appropriate for
6633  * its requirements.  The pipe will be connected to no other encoders.
6634  *
6635  * Currently this code will only succeed if there is a pipe with no encoders
6636  * configured for it.  In the future, it could choose to temporarily disable
6637  * some outputs to free up a pipe for its use.
6638  *
6639  * \return crtc, or NULL if no pipes are available.
6640  */
6641 
6642 /* VESA 640x480x72Hz mode to set on the pipe */
6643 static struct drm_display_mode load_detect_mode = {
6644 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6645 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6646 };
6647 
6648 static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device * dev,struct drm_mode_fb_cmd2 * mode_cmd,struct drm_i915_gem_object * obj)6649 intel_framebuffer_create(struct drm_device *dev,
6650 			 struct drm_mode_fb_cmd2 *mode_cmd,
6651 			 struct drm_i915_gem_object *obj)
6652 {
6653 	struct intel_framebuffer *intel_fb;
6654 	int ret;
6655 
6656 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6657 	if (!intel_fb) {
6658 		drm_gem_object_unreference_unlocked(&obj->base);
6659 		return ERR_PTR(-ENOMEM);
6660 	}
6661 
6662 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6663 	if (ret) {
6664 		drm_gem_object_unreference_unlocked(&obj->base);
6665 		kfree(intel_fb);
6666 		return ERR_PTR(ret);
6667 	}
6668 
6669 	return &intel_fb->base;
6670 }
6671 
6672 static u32
intel_framebuffer_pitch_for_width(int width,int bpp)6673 intel_framebuffer_pitch_for_width(int width, int bpp)
6674 {
6675 	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6676 	return ALIGN(pitch, 64);
6677 }
6678 
6679 static u32
intel_framebuffer_size_for_mode(struct drm_display_mode * mode,int bpp)6680 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6681 {
6682 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6683 	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6684 }
6685 
6686 static struct drm_framebuffer *
intel_framebuffer_create_for_mode(struct drm_device * dev,struct drm_display_mode * mode,int depth,int bpp)6687 intel_framebuffer_create_for_mode(struct drm_device *dev,
6688 				  struct drm_display_mode *mode,
6689 				  int depth, int bpp)
6690 {
6691 	struct drm_i915_gem_object *obj;
6692 	struct drm_mode_fb_cmd2 mode_cmd;
6693 
6694 	obj = i915_gem_alloc_object(dev,
6695 				    intel_framebuffer_size_for_mode(mode, bpp));
6696 	if (obj == NULL)
6697 		return ERR_PTR(-ENOMEM);
6698 
6699 	mode_cmd.width = mode->hdisplay;
6700 	mode_cmd.height = mode->vdisplay;
6701 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6702 								bpp);
6703 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
6704 
6705 	return intel_framebuffer_create(dev, &mode_cmd, obj);
6706 }
6707 
6708 static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device * dev,struct drm_display_mode * mode)6709 mode_fits_in_fbdev(struct drm_device *dev,
6710 		   struct drm_display_mode *mode)
6711 {
6712 	struct drm_i915_private *dev_priv = dev->dev_private;
6713 	struct drm_i915_gem_object *obj;
6714 	struct drm_framebuffer *fb;
6715 
6716 	if (dev_priv->fbdev == NULL)
6717 		return NULL;
6718 
6719 	obj = dev_priv->fbdev->ifb.obj;
6720 	if (obj == NULL)
6721 		return NULL;
6722 
6723 	fb = &dev_priv->fbdev->ifb.base;
6724 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6725 							       fb->bits_per_pixel))
6726 		return NULL;
6727 
6728 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
6729 		return NULL;
6730 
6731 	return fb;
6732 }
6733 
intel_get_load_detect_pipe(struct intel_encoder * intel_encoder,struct drm_connector * connector,struct drm_display_mode * mode,struct intel_load_detect_pipe * old)6734 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6735 				struct drm_connector *connector,
6736 				struct drm_display_mode *mode,
6737 				struct intel_load_detect_pipe *old)
6738 {
6739 	struct intel_crtc *intel_crtc;
6740 	struct drm_crtc *possible_crtc;
6741 	struct drm_encoder *encoder = &intel_encoder->base;
6742 	struct drm_crtc *crtc = NULL;
6743 	struct drm_device *dev = encoder->dev;
6744 	struct drm_framebuffer *old_fb;
6745 	int i = -1;
6746 
6747 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6748 		      connector->base.id, drm_get_connector_name(connector),
6749 		      encoder->base.id, drm_get_encoder_name(encoder));
6750 
6751 	/*
6752 	 * Algorithm gets a little messy:
6753 	 *
6754 	 *   - if the connector already has an assigned crtc, use it (but make
6755 	 *     sure it's on first)
6756 	 *
6757 	 *   - try to find the first unused crtc that can drive this connector,
6758 	 *     and use that if we find one
6759 	 */
6760 
6761 	/* See if we already have a CRTC for this connector */
6762 	if (encoder->crtc) {
6763 		crtc = encoder->crtc;
6764 
6765 		intel_crtc = to_intel_crtc(crtc);
6766 		old->dpms_mode = intel_crtc->dpms_mode;
6767 		old->load_detect_temp = false;
6768 
6769 		/* Make sure the crtc and connector are running */
6770 		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6771 			struct drm_encoder_helper_funcs *encoder_funcs;
6772 			struct drm_crtc_helper_funcs *crtc_funcs;
6773 
6774 			crtc_funcs = crtc->helper_private;
6775 			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6776 
6777 			encoder_funcs = encoder->helper_private;
6778 			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6779 		}
6780 
6781 		return true;
6782 	}
6783 
6784 	/* Find an unused one (if possible) */
6785 	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6786 		i++;
6787 		if (!(encoder->possible_crtcs & (1 << i)))
6788 			continue;
6789 		if (!possible_crtc->enabled) {
6790 			crtc = possible_crtc;
6791 			break;
6792 		}
6793 	}
6794 
6795 	/*
6796 	 * If we didn't find an unused CRTC, don't use any.
6797 	 */
6798 	if (!crtc) {
6799 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6800 		return false;
6801 	}
6802 
6803 	encoder->crtc = crtc;
6804 	connector->encoder = encoder;
6805 
6806 	intel_crtc = to_intel_crtc(crtc);
6807 	old->dpms_mode = intel_crtc->dpms_mode;
6808 	old->load_detect_temp = true;
6809 	old->release_fb = NULL;
6810 
6811 	if (!mode)
6812 		mode = &load_detect_mode;
6813 
6814 	old_fb = crtc->fb;
6815 
6816 	/* We need a framebuffer large enough to accommodate all accesses
6817 	 * that the plane may generate whilst we perform load detection.
6818 	 * We can not rely on the fbcon either being present (we get called
6819 	 * during its initialisation to detect all boot displays, or it may
6820 	 * not even exist) or that it is large enough to satisfy the
6821 	 * requested mode.
6822 	 */
6823 	crtc->fb = mode_fits_in_fbdev(dev, mode);
6824 	if (crtc->fb == NULL) {
6825 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6826 		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6827 		old->release_fb = crtc->fb;
6828 	} else
6829 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6830 	if (IS_ERR(crtc->fb)) {
6831 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6832 		crtc->fb = old_fb;
6833 		return false;
6834 	}
6835 
6836 	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6837 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6838 		if (old->release_fb)
6839 			old->release_fb->funcs->destroy(old->release_fb);
6840 		crtc->fb = old_fb;
6841 		return false;
6842 	}
6843 
6844 	/* let the connector get through one full cycle before testing */
6845 	intel_wait_for_vblank(dev, intel_crtc->pipe);
6846 
6847 	return true;
6848 }
6849 
intel_release_load_detect_pipe(struct intel_encoder * intel_encoder,struct drm_connector * connector,struct intel_load_detect_pipe * old)6850 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6851 				    struct drm_connector *connector,
6852 				    struct intel_load_detect_pipe *old)
6853 {
6854 	struct drm_encoder *encoder = &intel_encoder->base;
6855 	struct drm_device *dev = encoder->dev;
6856 	struct drm_crtc *crtc = encoder->crtc;
6857 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6858 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
6859 
6860 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6861 		      connector->base.id, drm_get_connector_name(connector),
6862 		      encoder->base.id, drm_get_encoder_name(encoder));
6863 
6864 	if (old->load_detect_temp) {
6865 		connector->encoder = NULL;
6866 		drm_helper_disable_unused_functions(dev);
6867 
6868 		if (old->release_fb)
6869 			old->release_fb->funcs->destroy(old->release_fb);
6870 
6871 		return;
6872 	}
6873 
6874 	/* Switch crtc and encoder back off if necessary */
6875 	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6876 		encoder_funcs->dpms(encoder, old->dpms_mode);
6877 		crtc_funcs->dpms(crtc, old->dpms_mode);
6878 	}
6879 }
6880 
6881 /* Returns the clock of the currently programmed mode of the given pipe. */
intel_crtc_clock_get(struct drm_device * dev,struct drm_crtc * crtc)6882 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6883 {
6884 	struct drm_i915_private *dev_priv = dev->dev_private;
6885 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6886 	int pipe = intel_crtc->pipe;
6887 	u32 dpll = I915_READ(DPLL(pipe));
6888 	u32 fp;
6889 	intel_clock_t clock;
6890 
6891 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6892 		fp = I915_READ(FP0(pipe));
6893 	else
6894 		fp = I915_READ(FP1(pipe));
6895 
6896 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6897 	if (IS_PINEVIEW(dev)) {
6898 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6899 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6900 	} else {
6901 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6902 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6903 	}
6904 
6905 	if (!IS_GEN2(dev)) {
6906 		if (IS_PINEVIEW(dev))
6907 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6908 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6909 		else
6910 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6911 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6912 
6913 		switch (dpll & DPLL_MODE_MASK) {
6914 		case DPLLB_MODE_DAC_SERIAL:
6915 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6916 				5 : 10;
6917 			break;
6918 		case DPLLB_MODE_LVDS:
6919 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6920 				7 : 14;
6921 			break;
6922 		default:
6923 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6924 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6925 			return 0;
6926 		}
6927 
6928 		/* XXX: Handle the 100Mhz refclk */
6929 		intel_clock(dev, 96000, &clock);
6930 	} else {
6931 		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6932 
6933 		if (is_lvds) {
6934 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6935 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6936 			clock.p2 = 14;
6937 
6938 			if ((dpll & PLL_REF_INPUT_MASK) ==
6939 			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6940 				/* XXX: might not be 66MHz */
6941 				intel_clock(dev, 66000, &clock);
6942 			} else
6943 				intel_clock(dev, 48000, &clock);
6944 		} else {
6945 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6946 				clock.p1 = 2;
6947 			else {
6948 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6949 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6950 			}
6951 			if (dpll & PLL_P2_DIVIDE_BY_4)
6952 				clock.p2 = 4;
6953 			else
6954 				clock.p2 = 2;
6955 
6956 			intel_clock(dev, 48000, &clock);
6957 		}
6958 	}
6959 
6960 	/* XXX: It would be nice to validate the clocks, but we can't reuse
6961 	 * i830PllIsValid() because it relies on the xf86_config connector
6962 	 * configuration being accurate, which it isn't necessarily.
6963 	 */
6964 
6965 	return clock.dot;
6966 }
6967 
6968 /** Returns the currently programmed mode of the given pipe. */
intel_crtc_mode_get(struct drm_device * dev,struct drm_crtc * crtc)6969 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6970 					     struct drm_crtc *crtc)
6971 {
6972 	struct drm_i915_private *dev_priv = dev->dev_private;
6973 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6974 	int pipe = intel_crtc->pipe;
6975 	struct drm_display_mode *mode;
6976 	int htot = I915_READ(HTOTAL(pipe));
6977 	int hsync = I915_READ(HSYNC(pipe));
6978 	int vtot = I915_READ(VTOTAL(pipe));
6979 	int vsync = I915_READ(VSYNC(pipe));
6980 
6981 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6982 	if (!mode)
6983 		return NULL;
6984 
6985 	mode->clock = intel_crtc_clock_get(dev, crtc);
6986 	mode->hdisplay = (htot & 0xffff) + 1;
6987 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6988 	mode->hsync_start = (hsync & 0xffff) + 1;
6989 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6990 	mode->vdisplay = (vtot & 0xffff) + 1;
6991 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6992 	mode->vsync_start = (vsync & 0xffff) + 1;
6993 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6994 
6995 	drm_mode_set_name(mode);
6996 	drm_mode_set_crtcinfo(mode, 0);
6997 
6998 	return mode;
6999 }
7000 
7001 #define GPU_IDLE_TIMEOUT 500 /* ms */
7002 
7003 /* When this timer fires, we've been idle for awhile */
intel_gpu_idle_timer(unsigned long arg)7004 static void intel_gpu_idle_timer(unsigned long arg)
7005 {
7006 	struct drm_device *dev = (struct drm_device *)arg;
7007 	drm_i915_private_t *dev_priv = dev->dev_private;
7008 
7009 	if (!list_empty(&dev_priv->mm.active_list)) {
7010 		/* Still processing requests, so just re-arm the timer. */
7011 		mod_timer(&dev_priv->idle_timer, jiffies +
7012 			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
7013 		return;
7014 	}
7015 
7016 	dev_priv->busy = false;
7017 	queue_work(dev_priv->wq, &dev_priv->idle_work);
7018 }
7019 
7020 #define CRTC_IDLE_TIMEOUT 1000 /* ms */
7021 
intel_crtc_idle_timer(unsigned long arg)7022 static void intel_crtc_idle_timer(unsigned long arg)
7023 {
7024 	struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
7025 	struct drm_crtc *crtc = &intel_crtc->base;
7026 	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
7027 	struct intel_framebuffer *intel_fb;
7028 
7029 	intel_fb = to_intel_framebuffer(crtc->fb);
7030 	if (intel_fb && intel_fb->obj->active) {
7031 		/* The framebuffer is still being accessed by the GPU. */
7032 		mod_timer(&intel_crtc->idle_timer, jiffies +
7033 			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7034 		return;
7035 	}
7036 
7037 	intel_crtc->busy = false;
7038 	queue_work(dev_priv->wq, &dev_priv->idle_work);
7039 }
7040 
intel_increase_pllclock(struct drm_crtc * crtc)7041 static void intel_increase_pllclock(struct drm_crtc *crtc)
7042 {
7043 	struct drm_device *dev = crtc->dev;
7044 	drm_i915_private_t *dev_priv = dev->dev_private;
7045 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7046 	int pipe = intel_crtc->pipe;
7047 	int dpll_reg = DPLL(pipe);
7048 	int dpll;
7049 
7050 	if (HAS_PCH_SPLIT(dev))
7051 		return;
7052 
7053 	if (!dev_priv->lvds_downclock_avail)
7054 		return;
7055 
7056 	dpll = I915_READ(dpll_reg);
7057 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
7058 		DRM_DEBUG_DRIVER("upclocking LVDS\n");
7059 
7060 		assert_panel_unlocked(dev_priv, pipe);
7061 
7062 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7063 		I915_WRITE(dpll_reg, dpll);
7064 		intel_wait_for_vblank(dev, pipe);
7065 
7066 		dpll = I915_READ(dpll_reg);
7067 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
7068 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
7069 	}
7070 
7071 	/* Schedule downclock */
7072 	mod_timer(&intel_crtc->idle_timer, jiffies +
7073 		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7074 }
7075 
intel_decrease_pllclock(struct drm_crtc * crtc)7076 static void intel_decrease_pllclock(struct drm_crtc *crtc)
7077 {
7078 	struct drm_device *dev = crtc->dev;
7079 	drm_i915_private_t *dev_priv = dev->dev_private;
7080 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7081 
7082 	if (HAS_PCH_SPLIT(dev))
7083 		return;
7084 
7085 	if (!dev_priv->lvds_downclock_avail)
7086 		return;
7087 
7088 	/*
7089 	 * Since this is called by a timer, we should never get here in
7090 	 * the manual case.
7091 	 */
7092 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7093 		int pipe = intel_crtc->pipe;
7094 		int dpll_reg = DPLL(pipe);
7095 		u32 dpll;
7096 
7097 		DRM_DEBUG_DRIVER("downclocking LVDS\n");
7098 
7099 		assert_panel_unlocked(dev_priv, pipe);
7100 
7101 		dpll = I915_READ(dpll_reg);
7102 		dpll |= DISPLAY_RATE_SELECT_FPA1;
7103 		I915_WRITE(dpll_reg, dpll);
7104 		intel_wait_for_vblank(dev, pipe);
7105 		dpll = I915_READ(dpll_reg);
7106 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7107 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7108 	}
7109 }
7110 
7111 /**
7112  * intel_idle_update - adjust clocks for idleness
7113  * @work: work struct
7114  *
7115  * Either the GPU or display (or both) went idle.  Check the busy status
7116  * here and adjust the CRTC and GPU clocks as necessary.
7117  */
intel_idle_update(struct work_struct * work)7118 static void intel_idle_update(struct work_struct *work)
7119 {
7120 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
7121 						    idle_work);
7122 	struct drm_device *dev = dev_priv->dev;
7123 	struct drm_crtc *crtc;
7124 	struct intel_crtc *intel_crtc;
7125 
7126 	if (!i915_powersave)
7127 		return;
7128 
7129 	mutex_lock(&dev->struct_mutex);
7130 
7131 	i915_update_gfx_val(dev_priv);
7132 
7133 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7134 		/* Skip inactive CRTCs */
7135 		if (!crtc->fb)
7136 			continue;
7137 
7138 		intel_crtc = to_intel_crtc(crtc);
7139 		if (!intel_crtc->busy)
7140 			intel_decrease_pllclock(crtc);
7141 	}
7142 
7143 
7144 	mutex_unlock(&dev->struct_mutex);
7145 }
7146 
7147 /**
7148  * intel_mark_busy - mark the GPU and possibly the display busy
7149  * @dev: drm device
7150  * @obj: object we're operating on
7151  *
7152  * Callers can use this function to indicate that the GPU is busy processing
7153  * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
7154  * buffer), we'll also mark the display as busy, so we know to increase its
7155  * clock frequency.
7156  */
intel_mark_busy(struct drm_device * dev,struct drm_i915_gem_object * obj)7157 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
7158 {
7159 	drm_i915_private_t *dev_priv = dev->dev_private;
7160 	struct drm_crtc *crtc = NULL;
7161 	struct intel_framebuffer *intel_fb;
7162 	struct intel_crtc *intel_crtc;
7163 
7164 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
7165 		return;
7166 
7167 	if (!dev_priv->busy)
7168 		dev_priv->busy = true;
7169 	else
7170 		mod_timer(&dev_priv->idle_timer, jiffies +
7171 			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
7172 
7173 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7174 		if (!crtc->fb)
7175 			continue;
7176 
7177 		intel_crtc = to_intel_crtc(crtc);
7178 		intel_fb = to_intel_framebuffer(crtc->fb);
7179 		if (intel_fb->obj == obj) {
7180 			if (!intel_crtc->busy) {
7181 				/* Non-busy -> busy, upclock */
7182 				intel_increase_pllclock(crtc);
7183 				intel_crtc->busy = true;
7184 			} else {
7185 				/* Busy -> busy, put off timer */
7186 				mod_timer(&intel_crtc->idle_timer, jiffies +
7187 					  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7188 			}
7189 		}
7190 	}
7191 }
7192 
intel_crtc_destroy(struct drm_crtc * crtc)7193 static void intel_crtc_destroy(struct drm_crtc *crtc)
7194 {
7195 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7196 	struct drm_device *dev = crtc->dev;
7197 	struct intel_unpin_work *work;
7198 	unsigned long flags;
7199 
7200 	spin_lock_irqsave(&dev->event_lock, flags);
7201 	work = intel_crtc->unpin_work;
7202 	intel_crtc->unpin_work = NULL;
7203 	spin_unlock_irqrestore(&dev->event_lock, flags);
7204 
7205 	if (work) {
7206 		cancel_work_sync(&work->work);
7207 		kfree(work);
7208 	}
7209 
7210 	drm_crtc_cleanup(crtc);
7211 
7212 	kfree(intel_crtc);
7213 }
7214 
intel_unpin_work_fn(struct work_struct * __work)7215 static void intel_unpin_work_fn(struct work_struct *__work)
7216 {
7217 	struct intel_unpin_work *work =
7218 		container_of(__work, struct intel_unpin_work, work);
7219 
7220 	mutex_lock(&work->dev->struct_mutex);
7221 	intel_unpin_fb_obj(work->old_fb_obj);
7222 	drm_gem_object_unreference(&work->pending_flip_obj->base);
7223 	drm_gem_object_unreference(&work->old_fb_obj->base);
7224 
7225 	intel_update_fbc(work->dev);
7226 	mutex_unlock(&work->dev->struct_mutex);
7227 	kfree(work);
7228 }
7229 
do_intel_finish_page_flip(struct drm_device * dev,struct drm_crtc * crtc)7230 static void do_intel_finish_page_flip(struct drm_device *dev,
7231 				      struct drm_crtc *crtc)
7232 {
7233 	drm_i915_private_t *dev_priv = dev->dev_private;
7234 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7235 	struct intel_unpin_work *work;
7236 	struct drm_i915_gem_object *obj;
7237 	struct drm_pending_vblank_event *e;
7238 	struct timeval tnow, tvbl;
7239 	unsigned long flags;
7240 
7241 	/* Ignore early vblank irqs */
7242 	if (intel_crtc == NULL)
7243 		return;
7244 
7245 	do_gettimeofday(&tnow);
7246 
7247 	spin_lock_irqsave(&dev->event_lock, flags);
7248 	work = intel_crtc->unpin_work;
7249 
7250 	/* Ensure we don't miss a work->pending update ... */
7251 	smp_rmb();
7252 
7253 	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
7254 		spin_unlock_irqrestore(&dev->event_lock, flags);
7255 		return;
7256 	}
7257 
7258 	/* and that the unpin work is consistent wrt ->pending. */
7259 	smp_rmb();
7260 
7261 	intel_crtc->unpin_work = NULL;
7262 
7263 	if (work->event) {
7264 		e = work->event;
7265 		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
7266 
7267 		/* Called before vblank count and timestamps have
7268 		 * been updated for the vblank interval of flip
7269 		 * completion? Need to increment vblank count and
7270 		 * add one videorefresh duration to returned timestamp
7271 		 * to account for this. We assume this happened if we
7272 		 * get called over 0.9 frame durations after the last
7273 		 * timestamped vblank.
7274 		 *
7275 		 * This calculation can not be used with vrefresh rates
7276 		 * below 5Hz (10Hz to be on the safe side) without
7277 		 * promoting to 64 integers.
7278 		 */
7279 		if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
7280 		    9 * crtc->framedur_ns) {
7281 			e->event.sequence++;
7282 			tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
7283 					     crtc->framedur_ns);
7284 		}
7285 
7286 		e->event.tv_sec = tvbl.tv_sec;
7287 		e->event.tv_usec = tvbl.tv_usec;
7288 
7289 		list_add_tail(&e->base.link,
7290 			      &e->base.file_priv->event_list);
7291 		wake_up_interruptible(&e->base.file_priv->event_wait);
7292 	}
7293 
7294 	drm_vblank_put(dev, intel_crtc->pipe);
7295 
7296 	spin_unlock_irqrestore(&dev->event_lock, flags);
7297 
7298 	obj = work->old_fb_obj;
7299 
7300 	atomic_clear_mask(1 << intel_crtc->plane,
7301 			  &obj->pending_flip.counter);
7302 
7303 	wake_up(&dev_priv->pending_flip_queue);
7304 	schedule_work(&work->work);
7305 
7306 	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
7307 }
7308 
intel_finish_page_flip(struct drm_device * dev,int pipe)7309 void intel_finish_page_flip(struct drm_device *dev, int pipe)
7310 {
7311 	drm_i915_private_t *dev_priv = dev->dev_private;
7312 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
7313 
7314 	do_intel_finish_page_flip(dev, crtc);
7315 }
7316 
intel_finish_page_flip_plane(struct drm_device * dev,int plane)7317 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7318 {
7319 	drm_i915_private_t *dev_priv = dev->dev_private;
7320 	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
7321 
7322 	do_intel_finish_page_flip(dev, crtc);
7323 }
7324 
intel_prepare_page_flip(struct drm_device * dev,int plane)7325 void intel_prepare_page_flip(struct drm_device *dev, int plane)
7326 {
7327 	drm_i915_private_t *dev_priv = dev->dev_private;
7328 	struct intel_crtc *intel_crtc =
7329 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7330 	unsigned long flags;
7331 
7332 	/* NB: An MMIO update of the plane base pointer will also
7333 	 * generate a page-flip completion irq, i.e. every modeset
7334 	 * is also accompanied by a spurious intel_prepare_page_flip().
7335 	 */
7336 	spin_lock_irqsave(&dev->event_lock, flags);
7337 	if (intel_crtc->unpin_work)
7338 		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
7339 	spin_unlock_irqrestore(&dev->event_lock, flags);
7340 }
7341 
intel_mark_page_flip_active(struct intel_crtc * intel_crtc)7342 inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7343 {
7344 	/* Ensure that the work item is consistent when activating it ... */
7345 	smp_wmb();
7346 	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7347 	/* and that it is marked active as soon as the irq could fire. */
7348 	smp_wmb();
7349 }
7350 
intel_gen2_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7351 static int intel_gen2_queue_flip(struct drm_device *dev,
7352 				 struct drm_crtc *crtc,
7353 				 struct drm_framebuffer *fb,
7354 				 struct drm_i915_gem_object *obj)
7355 {
7356 	struct drm_i915_private *dev_priv = dev->dev_private;
7357 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7358 	unsigned long offset;
7359 	u32 flip_mask;
7360 	int ret;
7361 
7362 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7363 	if (ret)
7364 		goto err;
7365 
7366 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
7367 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7368 
7369 	ret = BEGIN_LP_RING(6);
7370 	if (ret)
7371 		goto err_unpin;
7372 
7373 	/* Can't queue multiple flips, so wait for the previous
7374 	 * one to finish before executing the next.
7375 	 */
7376 	if (intel_crtc->plane)
7377 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7378 	else
7379 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7380 	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7381 	OUT_RING(MI_NOOP);
7382 	OUT_RING(MI_DISPLAY_FLIP |
7383 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7384 	OUT_RING(fb->pitches[0]);
7385 	OUT_RING(obj->gtt_offset + offset);
7386 	OUT_RING(0); /* aux display base address, unused */
7387 
7388 	intel_mark_page_flip_active(intel_crtc);
7389 	ADVANCE_LP_RING();
7390 	return 0;
7391 
7392 err_unpin:
7393 	intel_unpin_fb_obj(obj);
7394 err:
7395 	return ret;
7396 }
7397 
intel_gen3_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7398 static int intel_gen3_queue_flip(struct drm_device *dev,
7399 				 struct drm_crtc *crtc,
7400 				 struct drm_framebuffer *fb,
7401 				 struct drm_i915_gem_object *obj)
7402 {
7403 	struct drm_i915_private *dev_priv = dev->dev_private;
7404 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7405 	unsigned long offset;
7406 	u32 flip_mask;
7407 	int ret;
7408 
7409 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7410 	if (ret)
7411 		goto err;
7412 
7413 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
7414 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7415 
7416 	ret = BEGIN_LP_RING(6);
7417 	if (ret)
7418 		goto err_unpin;
7419 
7420 	if (intel_crtc->plane)
7421 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7422 	else
7423 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7424 	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7425 	OUT_RING(MI_NOOP);
7426 	OUT_RING(MI_DISPLAY_FLIP_I915 |
7427 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7428 	OUT_RING(fb->pitches[0]);
7429 	OUT_RING(obj->gtt_offset + offset);
7430 	OUT_RING(MI_NOOP);
7431 
7432 	intel_mark_page_flip_active(intel_crtc);
7433 	ADVANCE_LP_RING();
7434 	return 0;
7435 
7436 err_unpin:
7437 	intel_unpin_fb_obj(obj);
7438 err:
7439 	return ret;
7440 }
7441 
intel_gen4_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7442 static int intel_gen4_queue_flip(struct drm_device *dev,
7443 				 struct drm_crtc *crtc,
7444 				 struct drm_framebuffer *fb,
7445 				 struct drm_i915_gem_object *obj)
7446 {
7447 	struct drm_i915_private *dev_priv = dev->dev_private;
7448 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7449 	uint32_t pf, pipesrc;
7450 	int ret;
7451 
7452 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7453 	if (ret)
7454 		goto err;
7455 
7456 	ret = BEGIN_LP_RING(4);
7457 	if (ret)
7458 		goto err_unpin;
7459 
7460 	/* i965+ uses the linear or tiled offsets from the
7461 	 * Display Registers (which do not change across a page-flip)
7462 	 * so we need only reprogram the base address.
7463 	 */
7464 	OUT_RING(MI_DISPLAY_FLIP |
7465 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7466 	OUT_RING(fb->pitches[0]);
7467 	OUT_RING(obj->gtt_offset | obj->tiling_mode);
7468 
7469 	/* XXX Enabling the panel-fitter across page-flip is so far
7470 	 * untested on non-native modes, so ignore it for now.
7471 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7472 	 */
7473 	pf = 0;
7474 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7475 	OUT_RING(pf | pipesrc);
7476 
7477 	intel_mark_page_flip_active(intel_crtc);
7478 	ADVANCE_LP_RING();
7479 	return 0;
7480 
7481 err_unpin:
7482 	intel_unpin_fb_obj(obj);
7483 err:
7484 	return ret;
7485 }
7486 
intel_gen6_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7487 static int intel_gen6_queue_flip(struct drm_device *dev,
7488 				 struct drm_crtc *crtc,
7489 				 struct drm_framebuffer *fb,
7490 				 struct drm_i915_gem_object *obj)
7491 {
7492 	struct drm_i915_private *dev_priv = dev->dev_private;
7493 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7494 	uint32_t pf, pipesrc;
7495 	int ret;
7496 
7497 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7498 	if (ret)
7499 		goto err;
7500 
7501 	ret = BEGIN_LP_RING(4);
7502 	if (ret)
7503 		goto err_unpin;
7504 
7505 	OUT_RING(MI_DISPLAY_FLIP |
7506 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7507 	OUT_RING(fb->pitches[0] | obj->tiling_mode);
7508 	OUT_RING(obj->gtt_offset);
7509 
7510 	/* Contrary to the suggestions in the documentation,
7511 	 * "Enable Panel Fitter" does not seem to be required when page
7512 	 * flipping with a non-native mode, and worse causes a normal
7513 	 * modeset to fail.
7514 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7515 	 */
7516 	pf = 0;
7517 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7518 	OUT_RING(pf | pipesrc);
7519 
7520 	intel_mark_page_flip_active(intel_crtc);
7521 	ADVANCE_LP_RING();
7522 	return 0;
7523 
7524 err_unpin:
7525 	intel_unpin_fb_obj(obj);
7526 err:
7527 	return ret;
7528 }
7529 
7530 /*
7531  * On gen7 we currently use the blit ring because (in early silicon at least)
7532  * the render ring doesn't give us interrpts for page flip completion, which
7533  * means clients will hang after the first flip is queued.  Fortunately the
7534  * blit ring generates interrupts properly, so use it instead.
7535  */
intel_gen7_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7536 static int intel_gen7_queue_flip(struct drm_device *dev,
7537 				 struct drm_crtc *crtc,
7538 				 struct drm_framebuffer *fb,
7539 				 struct drm_i915_gem_object *obj)
7540 {
7541 	struct drm_i915_private *dev_priv = dev->dev_private;
7542 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7543 	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
7544 	uint32_t plane_bit = 0;
7545 	int ret;
7546 
7547 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7548 	if (ret)
7549 		goto err;
7550 
7551 	switch(intel_crtc->plane) {
7552 	case PLANE_A:
7553 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
7554 		break;
7555 	case PLANE_B:
7556 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
7557 		break;
7558 	case PLANE_C:
7559 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
7560 		break;
7561 	default:
7562 		WARN_ONCE(1, "unknown plane in flip command\n");
7563 		ret = -ENODEV;
7564 		goto err_unpin;
7565 	}
7566 
7567 	ret = intel_ring_begin(ring, 4);
7568 	if (ret)
7569 		goto err_unpin;
7570 
7571 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7572 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7573 	intel_ring_emit(ring, (obj->gtt_offset));
7574 	intel_ring_emit(ring, (MI_NOOP));
7575 
7576 	intel_mark_page_flip_active(intel_crtc);
7577 	intel_ring_advance(ring);
7578 	return 0;
7579 
7580 err_unpin:
7581 	intel_unpin_fb_obj(obj);
7582 err:
7583 	return ret;
7584 }
7585 
intel_default_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7586 static int intel_default_queue_flip(struct drm_device *dev,
7587 				    struct drm_crtc *crtc,
7588 				    struct drm_framebuffer *fb,
7589 				    struct drm_i915_gem_object *obj)
7590 {
7591 	return -ENODEV;
7592 }
7593 
intel_crtc_page_flip(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event)7594 static int intel_crtc_page_flip(struct drm_crtc *crtc,
7595 				struct drm_framebuffer *fb,
7596 				struct drm_pending_vblank_event *event)
7597 {
7598 	struct drm_device *dev = crtc->dev;
7599 	struct drm_i915_private *dev_priv = dev->dev_private;
7600 	struct drm_framebuffer *old_fb = crtc->fb;
7601 	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
7602 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7603 	struct intel_unpin_work *work;
7604 	unsigned long flags;
7605 	int ret;
7606 
7607 	work = kzalloc(sizeof *work, GFP_KERNEL);
7608 	if (work == NULL)
7609 		return -ENOMEM;
7610 
7611 	work->event = event;
7612 	work->dev = crtc->dev;
7613 	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
7614 	INIT_WORK(&work->work, intel_unpin_work_fn);
7615 
7616 	ret = drm_vblank_get(dev, intel_crtc->pipe);
7617 	if (ret)
7618 		goto free_work;
7619 
7620 	/* We borrow the event spin lock for protecting unpin_work */
7621 	spin_lock_irqsave(&dev->event_lock, flags);
7622 	if (intel_crtc->unpin_work) {
7623 		spin_unlock_irqrestore(&dev->event_lock, flags);
7624 		kfree(work);
7625 		drm_vblank_put(dev, intel_crtc->pipe);
7626 
7627 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7628 		return -EBUSY;
7629 	}
7630 	intel_crtc->unpin_work = work;
7631 	spin_unlock_irqrestore(&dev->event_lock, flags);
7632 
7633 	mutex_lock(&dev->struct_mutex);
7634 
7635 	/* Reference the objects for the scheduled work. */
7636 	drm_gem_object_reference(&work->old_fb_obj->base);
7637 	drm_gem_object_reference(&obj->base);
7638 
7639 	crtc->fb = fb;
7640 
7641 	work->pending_flip_obj = obj;
7642 
7643 	work->enable_stall_check = true;
7644 
7645 	/* Block clients from rendering to the new back buffer until
7646 	 * the flip occurs and the object is no longer visible.
7647 	 */
7648 	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7649 
7650 	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7651 	if (ret)
7652 		goto cleanup_pending;
7653 
7654 	intel_disable_fbc(dev);
7655 	mutex_unlock(&dev->struct_mutex);
7656 
7657 	trace_i915_flip_request(intel_crtc->plane, obj);
7658 
7659 	return 0;
7660 
7661 cleanup_pending:
7662 	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7663 	crtc->fb = old_fb;
7664 	drm_gem_object_unreference(&work->old_fb_obj->base);
7665 	drm_gem_object_unreference(&obj->base);
7666 	mutex_unlock(&dev->struct_mutex);
7667 
7668 	spin_lock_irqsave(&dev->event_lock, flags);
7669 	intel_crtc->unpin_work = NULL;
7670 	spin_unlock_irqrestore(&dev->event_lock, flags);
7671 
7672 	drm_vblank_put(dev, intel_crtc->pipe);
7673 free_work:
7674 	kfree(work);
7675 
7676 	return ret;
7677 }
7678 
intel_sanitize_modesetting(struct drm_device * dev,int pipe,int plane)7679 static void intel_sanitize_modesetting(struct drm_device *dev,
7680 				       int pipe, int plane)
7681 {
7682 	struct drm_i915_private *dev_priv = dev->dev_private;
7683 	u32 reg, val;
7684 	int i;
7685 
7686 	/* Clear any frame start delays used for debugging left by the BIOS */
7687 	for_each_pipe(i) {
7688 		reg = PIPECONF(i);
7689 		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
7690 	}
7691 
7692 	if (HAS_PCH_SPLIT(dev))
7693 		return;
7694 
7695 	/* Who knows what state these registers were left in by the BIOS or
7696 	 * grub?
7697 	 *
7698 	 * If we leave the registers in a conflicting state (e.g. with the
7699 	 * display plane reading from the other pipe than the one we intend
7700 	 * to use) then when we attempt to teardown the active mode, we will
7701 	 * not disable the pipes and planes in the correct order -- leaving
7702 	 * a plane reading from a disabled pipe and possibly leading to
7703 	 * undefined behaviour.
7704 	 */
7705 
7706 	reg = DSPCNTR(plane);
7707 	val = I915_READ(reg);
7708 
7709 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
7710 		return;
7711 	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
7712 		return;
7713 
7714 	/* This display plane is active and attached to the other CPU pipe. */
7715 	pipe = !pipe;
7716 
7717 	/* Disable the plane and wait for it to stop reading from the pipe. */
7718 	intel_disable_plane(dev_priv, plane, pipe);
7719 	intel_disable_pipe(dev_priv, pipe);
7720 }
7721 
intel_crtc_reset(struct drm_crtc * crtc)7722 static void intel_crtc_reset(struct drm_crtc *crtc)
7723 {
7724 	struct drm_device *dev = crtc->dev;
7725 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7726 
7727 	/* Reset flags back to the 'unknown' status so that they
7728 	 * will be correctly set on the initial modeset.
7729 	 */
7730 	intel_crtc->dpms_mode = -1;
7731 
7732 	/* We need to fix up any BIOS configuration that conflicts with
7733 	 * our expectations.
7734 	 */
7735 	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
7736 }
7737 
7738 static struct drm_crtc_helper_funcs intel_helper_funcs = {
7739 	.dpms = intel_crtc_dpms,
7740 	.mode_fixup = intel_crtc_mode_fixup,
7741 	.mode_set = intel_crtc_mode_set,
7742 	.mode_set_base = intel_pipe_set_base,
7743 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
7744 	.load_lut = intel_crtc_load_lut,
7745 	.disable = intel_crtc_disable,
7746 };
7747 
7748 static const struct drm_crtc_funcs intel_crtc_funcs = {
7749 	.reset = intel_crtc_reset,
7750 	.cursor_set = intel_crtc_cursor_set,
7751 	.cursor_move = intel_crtc_cursor_move,
7752 	.gamma_set = intel_crtc_gamma_set,
7753 	.set_config = drm_crtc_helper_set_config,
7754 	.destroy = intel_crtc_destroy,
7755 	.page_flip = intel_crtc_page_flip,
7756 };
7757 
intel_crtc_init(struct drm_device * dev,int pipe)7758 static void intel_crtc_init(struct drm_device *dev, int pipe)
7759 {
7760 	drm_i915_private_t *dev_priv = dev->dev_private;
7761 	struct intel_crtc *intel_crtc;
7762 	int i;
7763 
7764 	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
7765 	if (intel_crtc == NULL)
7766 		return;
7767 
7768 	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7769 
7770 	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
7771 	for (i = 0; i < 256; i++) {
7772 		intel_crtc->lut_r[i] = i;
7773 		intel_crtc->lut_g[i] = i;
7774 		intel_crtc->lut_b[i] = i;
7775 	}
7776 
7777 	/* Swap pipes & planes for FBC on pre-965 */
7778 	intel_crtc->pipe = pipe;
7779 	intel_crtc->plane = pipe;
7780 	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7781 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7782 		intel_crtc->plane = !pipe;
7783 	}
7784 
7785 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
7786 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
7787 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7788 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7789 
7790 	intel_crtc_reset(&intel_crtc->base);
7791 	intel_crtc->active = true; /* force the pipe off on setup_init_config */
7792 	intel_crtc->bpp = 24; /* default for pre-Ironlake */
7793 
7794 	if (HAS_PCH_SPLIT(dev)) {
7795 		if (pipe == 2 && IS_IVYBRIDGE(dev))
7796 			intel_crtc->no_pll = true;
7797 		intel_helper_funcs.prepare = ironlake_crtc_prepare;
7798 		intel_helper_funcs.commit = ironlake_crtc_commit;
7799 	} else {
7800 		intel_helper_funcs.prepare = i9xx_crtc_prepare;
7801 		intel_helper_funcs.commit = i9xx_crtc_commit;
7802 	}
7803 
7804 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
7805 
7806 	intel_crtc->busy = false;
7807 
7808 	setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
7809 		    (unsigned long)intel_crtc);
7810 }
7811 
intel_get_pipe_from_crtc_id(struct drm_device * dev,void * data,struct drm_file * file)7812 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7813 				struct drm_file *file)
7814 {
7815 	drm_i915_private_t *dev_priv = dev->dev_private;
7816 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7817 	struct drm_mode_object *drmmode_obj;
7818 	struct intel_crtc *crtc;
7819 
7820 	if (!dev_priv) {
7821 		DRM_ERROR("called with no initialization\n");
7822 		return -EINVAL;
7823 	}
7824 
7825 	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7826 			DRM_MODE_OBJECT_CRTC);
7827 
7828 	if (!drmmode_obj) {
7829 		DRM_ERROR("no such CRTC id\n");
7830 		return -EINVAL;
7831 	}
7832 
7833 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
7834 	pipe_from_crtc_id->pipe = crtc->pipe;
7835 
7836 	return 0;
7837 }
7838 
intel_encoder_clones(struct drm_device * dev,int type_mask)7839 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
7840 {
7841 	struct intel_encoder *encoder;
7842 	int index_mask = 0;
7843 	int entry = 0;
7844 
7845 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7846 		if (type_mask & encoder->clone_mask)
7847 			index_mask |= (1 << entry);
7848 		entry++;
7849 	}
7850 
7851 	return index_mask;
7852 }
7853 
has_edp_a(struct drm_device * dev)7854 static bool has_edp_a(struct drm_device *dev)
7855 {
7856 	struct drm_i915_private *dev_priv = dev->dev_private;
7857 
7858 	if (!IS_MOBILE(dev))
7859 		return false;
7860 
7861 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
7862 		return false;
7863 
7864 	if (IS_GEN5(dev) &&
7865 	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
7866 		return false;
7867 
7868 	return true;
7869 }
7870 
intel_setup_outputs(struct drm_device * dev)7871 static void intel_setup_outputs(struct drm_device *dev)
7872 {
7873 	struct drm_i915_private *dev_priv = dev->dev_private;
7874 	struct intel_encoder *encoder;
7875 	bool dpd_is_edp = false;
7876 	bool has_lvds;
7877 
7878 	has_lvds = intel_lvds_init(dev);
7879 	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
7880 		/* disable the panel fitter on everything but LVDS */
7881 		I915_WRITE(PFIT_CONTROL, 0);
7882 	}
7883 
7884 	if (HAS_PCH_SPLIT(dev)) {
7885 		dpd_is_edp = intel_dpd_is_edp(dev);
7886 
7887 		if (has_edp_a(dev))
7888 			intel_dp_init(dev, DP_A);
7889 
7890 		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7891 			intel_dp_init(dev, PCH_DP_D);
7892 	}
7893 
7894 	intel_crt_init(dev);
7895 
7896 	if (HAS_PCH_SPLIT(dev)) {
7897 		int found;
7898 
7899 		if (I915_READ(HDMIB) & PORT_DETECTED) {
7900 			/* PCH SDVOB multiplex with HDMIB */
7901 			found = intel_sdvo_init(dev, PCH_SDVOB);
7902 			if (!found)
7903 				intel_hdmi_init(dev, HDMIB);
7904 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
7905 				intel_dp_init(dev, PCH_DP_B);
7906 		}
7907 
7908 		if (I915_READ(HDMIC) & PORT_DETECTED)
7909 			intel_hdmi_init(dev, HDMIC);
7910 
7911 		if (I915_READ(HDMID) & PORT_DETECTED)
7912 			intel_hdmi_init(dev, HDMID);
7913 
7914 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
7915 			intel_dp_init(dev, PCH_DP_C);
7916 
7917 		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7918 			intel_dp_init(dev, PCH_DP_D);
7919 
7920 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7921 		bool found = false;
7922 
7923 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7924 			DRM_DEBUG_KMS("probing SDVOB\n");
7925 			found = intel_sdvo_init(dev, SDVOB);
7926 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7927 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7928 				intel_hdmi_init(dev, SDVOB);
7929 			}
7930 
7931 			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
7932 				DRM_DEBUG_KMS("probing DP_B\n");
7933 				intel_dp_init(dev, DP_B);
7934 			}
7935 		}
7936 
7937 		/* Before G4X SDVOC doesn't have its own detect register */
7938 
7939 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7940 			DRM_DEBUG_KMS("probing SDVOC\n");
7941 			found = intel_sdvo_init(dev, SDVOC);
7942 		}
7943 
7944 		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
7945 
7946 			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
7947 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
7948 				intel_hdmi_init(dev, SDVOC);
7949 			}
7950 			if (SUPPORTS_INTEGRATED_DP(dev)) {
7951 				DRM_DEBUG_KMS("probing DP_C\n");
7952 				intel_dp_init(dev, DP_C);
7953 			}
7954 		}
7955 
7956 		if (SUPPORTS_INTEGRATED_DP(dev) &&
7957 		    (I915_READ(DP_D) & DP_DETECTED)) {
7958 			DRM_DEBUG_KMS("probing DP_D\n");
7959 			intel_dp_init(dev, DP_D);
7960 		}
7961 	} else if (IS_GEN2(dev))
7962 		intel_dvo_init(dev);
7963 
7964 	if (SUPPORTS_TV(dev))
7965 		intel_tv_init(dev);
7966 
7967 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7968 		encoder->base.possible_crtcs = encoder->crtc_mask;
7969 		encoder->base.possible_clones =
7970 			intel_encoder_clones(dev, encoder->clone_mask);
7971 	}
7972 
7973 	/* disable all the possible outputs/crtcs before entering KMS mode */
7974 	drm_helper_disable_unused_functions(dev);
7975 
7976 	if (HAS_PCH_SPLIT(dev))
7977 		ironlake_init_pch_refclk(dev);
7978 }
7979 
intel_user_framebuffer_destroy(struct drm_framebuffer * fb)7980 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
7981 {
7982 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7983 
7984 	drm_framebuffer_cleanup(fb);
7985 	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
7986 
7987 	kfree(intel_fb);
7988 }
7989 
intel_user_framebuffer_create_handle(struct drm_framebuffer * fb,struct drm_file * file,unsigned int * handle)7990 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7991 						struct drm_file *file,
7992 						unsigned int *handle)
7993 {
7994 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7995 	struct drm_i915_gem_object *obj = intel_fb->obj;
7996 
7997 	return drm_gem_handle_create(file, &obj->base, handle);
7998 }
7999 
8000 static const struct drm_framebuffer_funcs intel_fb_funcs = {
8001 	.destroy = intel_user_framebuffer_destroy,
8002 	.create_handle = intel_user_framebuffer_create_handle,
8003 };
8004 
intel_framebuffer_init(struct drm_device * dev,struct intel_framebuffer * intel_fb,struct drm_mode_fb_cmd2 * mode_cmd,struct drm_i915_gem_object * obj)8005 int intel_framebuffer_init(struct drm_device *dev,
8006 			   struct intel_framebuffer *intel_fb,
8007 			   struct drm_mode_fb_cmd2 *mode_cmd,
8008 			   struct drm_i915_gem_object *obj)
8009 {
8010 	int ret;
8011 
8012 	if (obj->tiling_mode == I915_TILING_Y)
8013 		return -EINVAL;
8014 
8015 	if (mode_cmd->pitches[0] & 63)
8016 		return -EINVAL;
8017 
8018 	switch (mode_cmd->pixel_format) {
8019 	case DRM_FORMAT_RGB332:
8020 	case DRM_FORMAT_RGB565:
8021 	case DRM_FORMAT_XRGB8888:
8022 	case DRM_FORMAT_XBGR8888:
8023 	case DRM_FORMAT_ARGB8888:
8024 	case DRM_FORMAT_XRGB2101010:
8025 	case DRM_FORMAT_ARGB2101010:
8026 		/* RGB formats are common across chipsets */
8027 		break;
8028 	case DRM_FORMAT_YUYV:
8029 	case DRM_FORMAT_UYVY:
8030 	case DRM_FORMAT_YVYU:
8031 	case DRM_FORMAT_VYUY:
8032 		break;
8033 	default:
8034 		DRM_DEBUG_KMS("unsupported pixel format %u\n",
8035 				mode_cmd->pixel_format);
8036 		return -EINVAL;
8037 	}
8038 
8039 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8040 	if (ret) {
8041 		DRM_ERROR("framebuffer init failed %d\n", ret);
8042 		return ret;
8043 	}
8044 
8045 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8046 	intel_fb->obj = obj;
8047 	return 0;
8048 }
8049 
8050 static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,struct drm_mode_fb_cmd2 * mode_cmd)8051 intel_user_framebuffer_create(struct drm_device *dev,
8052 			      struct drm_file *filp,
8053 			      struct drm_mode_fb_cmd2 *mode_cmd)
8054 {
8055 	struct drm_i915_gem_object *obj;
8056 
8057 	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8058 						mode_cmd->handles[0]));
8059 	if (&obj->base == NULL)
8060 		return ERR_PTR(-ENOENT);
8061 
8062 	return intel_framebuffer_create(dev, mode_cmd, obj);
8063 }
8064 
8065 static const struct drm_mode_config_funcs intel_mode_funcs = {
8066 	.fb_create = intel_user_framebuffer_create,
8067 	.output_poll_changed = intel_fb_output_poll_changed,
8068 };
8069 
8070 static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device * dev)8071 intel_alloc_context_page(struct drm_device *dev)
8072 {
8073 	struct drm_i915_gem_object *ctx;
8074 	int ret;
8075 
8076 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8077 
8078 	ctx = i915_gem_alloc_object(dev, 4096);
8079 	if (!ctx) {
8080 		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8081 		return NULL;
8082 	}
8083 
8084 	ret = i915_gem_object_pin(ctx, 4096, true);
8085 	if (ret) {
8086 		DRM_ERROR("failed to pin power context: %d\n", ret);
8087 		goto err_unref;
8088 	}
8089 
8090 	ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8091 	if (ret) {
8092 		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8093 		goto err_unpin;
8094 	}
8095 
8096 	return ctx;
8097 
8098 err_unpin:
8099 	i915_gem_object_unpin(ctx);
8100 err_unref:
8101 	drm_gem_object_unreference(&ctx->base);
8102 	mutex_unlock(&dev->struct_mutex);
8103 	return NULL;
8104 }
8105 
ironlake_set_drps(struct drm_device * dev,u8 val)8106 bool ironlake_set_drps(struct drm_device *dev, u8 val)
8107 {
8108 	struct drm_i915_private *dev_priv = dev->dev_private;
8109 	u16 rgvswctl;
8110 
8111 	rgvswctl = I915_READ16(MEMSWCTL);
8112 	if (rgvswctl & MEMCTL_CMD_STS) {
8113 		DRM_DEBUG("gpu busy, RCS change rejected\n");
8114 		return false; /* still busy with another command */
8115 	}
8116 
8117 	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8118 		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8119 	I915_WRITE16(MEMSWCTL, rgvswctl);
8120 	POSTING_READ16(MEMSWCTL);
8121 
8122 	rgvswctl |= MEMCTL_CMD_STS;
8123 	I915_WRITE16(MEMSWCTL, rgvswctl);
8124 
8125 	return true;
8126 }
8127 
ironlake_enable_drps(struct drm_device * dev)8128 void ironlake_enable_drps(struct drm_device *dev)
8129 {
8130 	struct drm_i915_private *dev_priv = dev->dev_private;
8131 	u32 rgvmodectl = I915_READ(MEMMODECTL);
8132 	u8 fmax, fmin, fstart, vstart;
8133 
8134 	/* Enable temp reporting */
8135 	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8136 	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8137 
8138 	/* 100ms RC evaluation intervals */
8139 	I915_WRITE(RCUPEI, 100000);
8140 	I915_WRITE(RCDNEI, 100000);
8141 
8142 	/* Set max/min thresholds to 90ms and 80ms respectively */
8143 	I915_WRITE(RCBMAXAVG, 90000);
8144 	I915_WRITE(RCBMINAVG, 80000);
8145 
8146 	I915_WRITE(MEMIHYST, 1);
8147 
8148 	/* Set up min, max, and cur for interrupt handling */
8149 	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8150 	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8151 	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8152 		MEMMODE_FSTART_SHIFT;
8153 
8154 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8155 		PXVFREQ_PX_SHIFT;
8156 
8157 	dev_priv->fmax = fmax; /* IPS callback will increase this */
8158 	dev_priv->fstart = fstart;
8159 
8160 	dev_priv->max_delay = fstart;
8161 	dev_priv->min_delay = fmin;
8162 	dev_priv->cur_delay = fstart;
8163 
8164 	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
8165 			 fmax, fmin, fstart);
8166 
8167 	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8168 
8169 	/*
8170 	 * Interrupts will be enabled in ironlake_irq_postinstall
8171 	 */
8172 
8173 	I915_WRITE(VIDSTART, vstart);
8174 	POSTING_READ(VIDSTART);
8175 
8176 	rgvmodectl |= MEMMODE_SWMODE_EN;
8177 	I915_WRITE(MEMMODECTL, rgvmodectl);
8178 
8179 	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
8180 		DRM_ERROR("stuck trying to change perf mode\n");
8181 	msleep(1);
8182 
8183 	ironlake_set_drps(dev, fstart);
8184 
8185 	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8186 		I915_READ(0x112e0);
8187 	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8188 	dev_priv->last_count2 = I915_READ(0x112f4);
8189 	getrawmonotonic(&dev_priv->last_time2);
8190 }
8191 
ironlake_disable_drps(struct drm_device * dev)8192 void ironlake_disable_drps(struct drm_device *dev)
8193 {
8194 	struct drm_i915_private *dev_priv = dev->dev_private;
8195 	u16 rgvswctl = I915_READ16(MEMSWCTL);
8196 
8197 	/* Ack interrupts, disable EFC interrupt */
8198 	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8199 	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8200 	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8201 	I915_WRITE(DEIIR, DE_PCU_EVENT);
8202 	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8203 
8204 	/* Go back to the starting frequency */
8205 	ironlake_set_drps(dev, dev_priv->fstart);
8206 	msleep(1);
8207 	rgvswctl |= MEMCTL_CMD_STS;
8208 	I915_WRITE(MEMSWCTL, rgvswctl);
8209 	msleep(1);
8210 
8211 }
8212 
gen6_set_rps(struct drm_device * dev,u8 val)8213 void gen6_set_rps(struct drm_device *dev, u8 val)
8214 {
8215 	struct drm_i915_private *dev_priv = dev->dev_private;
8216 	u32 swreq;
8217 
8218 	swreq = (val & 0x3ff) << 25;
8219 	I915_WRITE(GEN6_RPNSWREQ, swreq);
8220 }
8221 
gen6_disable_rps(struct drm_device * dev)8222 void gen6_disable_rps(struct drm_device *dev)
8223 {
8224 	struct drm_i915_private *dev_priv = dev->dev_private;
8225 
8226 	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8227 	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8228 	I915_WRITE(GEN6_PMIER, 0);
8229 	/* Complete PM interrupt masking here doesn't race with the rps work
8230 	 * item again unmasking PM interrupts because that is using a different
8231 	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8232 	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8233 
8234 	spin_lock_irq(&dev_priv->rps_lock);
8235 	dev_priv->pm_iir = 0;
8236 	spin_unlock_irq(&dev_priv->rps_lock);
8237 
8238 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8239 }
8240 
intel_pxfreq(u32 vidfreq)8241 static unsigned long intel_pxfreq(u32 vidfreq)
8242 {
8243 	unsigned long freq;
8244 	int div = (vidfreq & 0x3f0000) >> 16;
8245 	int post = (vidfreq & 0x3000) >> 12;
8246 	int pre = (vidfreq & 0x7);
8247 
8248 	if (!pre)
8249 		return 0;
8250 
8251 	freq = ((div * 133333) / ((1<<post) * pre));
8252 
8253 	return freq;
8254 }
8255 
intel_init_emon(struct drm_device * dev)8256 void intel_init_emon(struct drm_device *dev)
8257 {
8258 	struct drm_i915_private *dev_priv = dev->dev_private;
8259 	u32 lcfuse;
8260 	u8 pxw[16];
8261 	int i;
8262 
8263 	/* Disable to program */
8264 	I915_WRITE(ECR, 0);
8265 	POSTING_READ(ECR);
8266 
8267 	/* Program energy weights for various events */
8268 	I915_WRITE(SDEW, 0x15040d00);
8269 	I915_WRITE(CSIEW0, 0x007f0000);
8270 	I915_WRITE(CSIEW1, 0x1e220004);
8271 	I915_WRITE(CSIEW2, 0x04000004);
8272 
8273 	for (i = 0; i < 5; i++)
8274 		I915_WRITE(PEW + (i * 4), 0);
8275 	for (i = 0; i < 3; i++)
8276 		I915_WRITE(DEW + (i * 4), 0);
8277 
8278 	/* Program P-state weights to account for frequency power adjustment */
8279 	for (i = 0; i < 16; i++) {
8280 		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8281 		unsigned long freq = intel_pxfreq(pxvidfreq);
8282 		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8283 			PXVFREQ_PX_SHIFT;
8284 		unsigned long val;
8285 
8286 		val = vid * vid;
8287 		val *= (freq / 1000);
8288 		val *= 255;
8289 		val /= (127*127*900);
8290 		if (val > 0xff)
8291 			DRM_ERROR("bad pxval: %ld\n", val);
8292 		pxw[i] = val;
8293 	}
8294 	/* Render standby states get 0 weight */
8295 	pxw[14] = 0;
8296 	pxw[15] = 0;
8297 
8298 	for (i = 0; i < 4; i++) {
8299 		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8300 			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8301 		I915_WRITE(PXW + (i * 4), val);
8302 	}
8303 
8304 	/* Adjust magic regs to magic values (more experimental results) */
8305 	I915_WRITE(OGW0, 0);
8306 	I915_WRITE(OGW1, 0);
8307 	I915_WRITE(EG0, 0x00007f00);
8308 	I915_WRITE(EG1, 0x0000000e);
8309 	I915_WRITE(EG2, 0x000e0000);
8310 	I915_WRITE(EG3, 0x68000300);
8311 	I915_WRITE(EG4, 0x42000000);
8312 	I915_WRITE(EG5, 0x00140031);
8313 	I915_WRITE(EG6, 0);
8314 	I915_WRITE(EG7, 0);
8315 
8316 	for (i = 0; i < 8; i++)
8317 		I915_WRITE(PXWL + (i * 4), 0);
8318 
8319 	/* Enable PMON + select events */
8320 	I915_WRITE(ECR, 0x80000019);
8321 
8322 	lcfuse = I915_READ(LCFUSE02);
8323 
8324 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8325 }
8326 
intel_enable_rc6(struct drm_device * dev)8327 static int intel_enable_rc6(struct drm_device *dev)
8328 {
8329 	/*
8330 	 * Respect the kernel parameter if it is set
8331 	 */
8332 	if (i915_enable_rc6 >= 0)
8333 		return i915_enable_rc6;
8334 
8335 	/*
8336 	 * Disable RC6 on Ironlake
8337 	 */
8338 	if (INTEL_INFO(dev)->gen == 5)
8339 		return 0;
8340 
8341 	/*
8342 	 * Disable rc6 on Sandybridge
8343 	 */
8344 	if (INTEL_INFO(dev)->gen == 6) {
8345 		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
8346 		return INTEL_RC6_ENABLE;
8347 	}
8348 	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8349 	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8350 }
8351 
gen6_enable_rps(struct drm_i915_private * dev_priv)8352 void gen6_enable_rps(struct drm_i915_private *dev_priv)
8353 {
8354 	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8355 	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8356 	u32 pcu_mbox, rc6_mask = 0;
8357 	u32 gtfifodbg;
8358 	int cur_freq, min_freq, max_freq;
8359 	int rc6_mode;
8360 	int i;
8361 
8362 	/* Here begins a magic sequence of register writes to enable
8363 	 * auto-downclocking.
8364 	 *
8365 	 * Perhaps there might be some value in exposing these to
8366 	 * userspace...
8367 	 */
8368 	I915_WRITE(GEN6_RC_STATE, 0);
8369 	mutex_lock(&dev_priv->dev->struct_mutex);
8370 
8371 	/* Clear the DBG now so we don't confuse earlier errors */
8372 	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8373 		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8374 		I915_WRITE(GTFIFODBG, gtfifodbg);
8375 	}
8376 
8377 	gen6_gt_force_wake_get(dev_priv);
8378 
8379 	/* disable the counters and set deterministic thresholds */
8380 	I915_WRITE(GEN6_RC_CONTROL, 0);
8381 
8382 	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8383 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8384 	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8385 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8386 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8387 
8388 	for (i = 0; i < I915_NUM_RINGS; i++)
8389 		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
8390 
8391 	I915_WRITE(GEN6_RC_SLEEP, 0);
8392 	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8393 	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8394 	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
8395 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8396 
8397 	rc6_mode = intel_enable_rc6(dev_priv->dev);
8398 	if (rc6_mode & INTEL_RC6_ENABLE)
8399 		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8400 
8401 	if (rc6_mode & INTEL_RC6p_ENABLE)
8402 		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8403 
8404 	if (rc6_mode & INTEL_RC6pp_ENABLE)
8405 		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8406 
8407 	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8408 			(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8409 			(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8410 			(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8411 
8412 	I915_WRITE(GEN6_RC_CONTROL,
8413 		   rc6_mask |
8414 		   GEN6_RC_CTL_EI_MODE(1) |
8415 		   GEN6_RC_CTL_HW_ENABLE);
8416 
8417 	I915_WRITE(GEN6_RPNSWREQ,
8418 		   GEN6_FREQUENCY(10) |
8419 		   GEN6_OFFSET(0) |
8420 		   GEN6_AGGRESSIVE_TURBO);
8421 	I915_WRITE(GEN6_RC_VIDEO_FREQ,
8422 		   GEN6_FREQUENCY(12));
8423 
8424 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8425 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8426 		   18 << 24 |
8427 		   6 << 16);
8428 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
8429 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
8430 	I915_WRITE(GEN6_RP_UP_EI, 100000);
8431 	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8432 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8433 	I915_WRITE(GEN6_RP_CONTROL,
8434 		   GEN6_RP_MEDIA_TURBO |
8435 		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
8436 		   GEN6_RP_MEDIA_IS_GFX |
8437 		   GEN6_RP_ENABLE |
8438 		   GEN6_RP_UP_BUSY_AVG |
8439 		   GEN6_RP_DOWN_IDLE_CONT);
8440 
8441 	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8442 		     500))
8443 		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8444 
8445 	I915_WRITE(GEN6_PCODE_DATA, 0);
8446 	I915_WRITE(GEN6_PCODE_MAILBOX,
8447 		   GEN6_PCODE_READY |
8448 		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8449 	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8450 		     500))
8451 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8452 
8453 	min_freq = (rp_state_cap & 0xff0000) >> 16;
8454 	max_freq = rp_state_cap & 0xff;
8455 	cur_freq = (gt_perf_status & 0xff00) >> 8;
8456 
8457 	/* Check for overclock support */
8458 	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8459 		     500))
8460 		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8461 	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8462 	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8463 	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8464 		     500))
8465 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8466 	if (pcu_mbox & (1<<31)) { /* OC supported */
8467 		max_freq = pcu_mbox & 0xff;
8468 		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8469 	}
8470 
8471 	/* In units of 100MHz */
8472 	dev_priv->max_delay = max_freq;
8473 	dev_priv->min_delay = min_freq;
8474 	dev_priv->cur_delay = cur_freq;
8475 
8476 	/* requires MSI enabled */
8477 	I915_WRITE(GEN6_PMIER,
8478 		   GEN6_PM_MBOX_EVENT |
8479 		   GEN6_PM_THERMAL_EVENT |
8480 		   GEN6_PM_RP_DOWN_TIMEOUT |
8481 		   GEN6_PM_RP_UP_THRESHOLD |
8482 		   GEN6_PM_RP_DOWN_THRESHOLD |
8483 		   GEN6_PM_RP_UP_EI_EXPIRED |
8484 		   GEN6_PM_RP_DOWN_EI_EXPIRED);
8485 	spin_lock_irq(&dev_priv->rps_lock);
8486 	WARN_ON(dev_priv->pm_iir != 0);
8487 	I915_WRITE(GEN6_PMIMR, 0);
8488 	spin_unlock_irq(&dev_priv->rps_lock);
8489 	/* enable all PM interrupts */
8490 	I915_WRITE(GEN6_PMINTRMSK, 0);
8491 
8492 	gen6_gt_force_wake_put(dev_priv);
8493 	mutex_unlock(&dev_priv->dev->struct_mutex);
8494 }
8495 
gen6_update_ring_freq(struct drm_i915_private * dev_priv)8496 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8497 {
8498 	int min_freq = 15;
8499 	int gpu_freq, ia_freq, max_ia_freq;
8500 	int scaling_factor = 180;
8501 
8502 	max_ia_freq = cpufreq_quick_get_max(0);
8503 	/*
8504 	 * Default to measured freq if none found, PCU will ensure we don't go
8505 	 * over
8506 	 */
8507 	if (!max_ia_freq)
8508 		max_ia_freq = tsc_khz;
8509 
8510 	/* Convert from kHz to MHz */
8511 	max_ia_freq /= 1000;
8512 
8513 	mutex_lock(&dev_priv->dev->struct_mutex);
8514 
8515 	/*
8516 	 * For each potential GPU frequency, load a ring frequency we'd like
8517 	 * to use for memory access.  We do this by specifying the IA frequency
8518 	 * the PCU should use as a reference to determine the ring frequency.
8519 	 */
8520 	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8521 	     gpu_freq--) {
8522 		int diff = dev_priv->max_delay - gpu_freq;
8523 
8524 		/*
8525 		 * For GPU frequencies less than 750MHz, just use the lowest
8526 		 * ring freq.
8527 		 */
8528 		if (gpu_freq < min_freq)
8529 			ia_freq = 800;
8530 		else
8531 			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8532 		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
8533 
8534 		I915_WRITE(GEN6_PCODE_DATA,
8535 			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8536 			   gpu_freq);
8537 		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8538 			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8539 		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
8540 			      GEN6_PCODE_READY) == 0, 10)) {
8541 			DRM_ERROR("pcode write of freq table timed out\n");
8542 			continue;
8543 		}
8544 	}
8545 
8546 	mutex_unlock(&dev_priv->dev->struct_mutex);
8547 }
8548 
ironlake_init_clock_gating(struct drm_device * dev)8549 static void ironlake_init_clock_gating(struct drm_device *dev)
8550 {
8551 	struct drm_i915_private *dev_priv = dev->dev_private;
8552 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8553 
8554 	/* Required for FBC */
8555 	dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8556 		DPFCRUNIT_CLOCK_GATE_DISABLE |
8557 		DPFDUNIT_CLOCK_GATE_DISABLE;
8558 	/* Required for CxSR */
8559 	dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8560 
8561 	I915_WRITE(PCH_3DCGDIS0,
8562 		   MARIUNIT_CLOCK_GATE_DISABLE |
8563 		   SVSMUNIT_CLOCK_GATE_DISABLE);
8564 	I915_WRITE(PCH_3DCGDIS1,
8565 		   VFMUNIT_CLOCK_GATE_DISABLE);
8566 
8567 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8568 
8569 	/*
8570 	 * According to the spec the following bits should be set in
8571 	 * order to enable memory self-refresh
8572 	 * The bit 22/21 of 0x42004
8573 	 * The bit 5 of 0x42020
8574 	 * The bit 15 of 0x45000
8575 	 */
8576 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8577 		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
8578 		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8579 	I915_WRITE(ILK_DSPCLK_GATE,
8580 		   (I915_READ(ILK_DSPCLK_GATE) |
8581 		    ILK_DPARB_CLK_GATE));
8582 	I915_WRITE(DISP_ARB_CTL,
8583 		   (I915_READ(DISP_ARB_CTL) |
8584 		    DISP_FBC_WM_DIS));
8585 	I915_WRITE(WM3_LP_ILK, 0);
8586 	I915_WRITE(WM2_LP_ILK, 0);
8587 	I915_WRITE(WM1_LP_ILK, 0);
8588 
8589 	/*
8590 	 * Based on the document from hardware guys the following bits
8591 	 * should be set unconditionally in order to enable FBC.
8592 	 * The bit 22 of 0x42000
8593 	 * The bit 22 of 0x42004
8594 	 * The bit 7,8,9 of 0x42020.
8595 	 */
8596 	if (IS_IRONLAKE_M(dev)) {
8597 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
8598 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
8599 			   ILK_FBCQ_DIS);
8600 		I915_WRITE(ILK_DISPLAY_CHICKEN2,
8601 			   I915_READ(ILK_DISPLAY_CHICKEN2) |
8602 			   ILK_DPARB_GATE);
8603 		I915_WRITE(ILK_DSPCLK_GATE,
8604 			   I915_READ(ILK_DSPCLK_GATE) |
8605 			   ILK_DPFC_DIS1 |
8606 			   ILK_DPFC_DIS2 |
8607 			   ILK_CLK_FBC);
8608 	}
8609 
8610 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8611 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8612 		   ILK_ELPIN_409_SELECT);
8613 	I915_WRITE(_3D_CHICKEN2,
8614 		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8615 		   _3D_CHICKEN2_WM_READ_PIPELINED);
8616 }
8617 
gen6_init_clock_gating(struct drm_device * dev)8618 static void gen6_init_clock_gating(struct drm_device *dev)
8619 {
8620 	struct drm_i915_private *dev_priv = dev->dev_private;
8621 	int pipe;
8622 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8623 
8624 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8625 
8626 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8627 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8628 		   ILK_ELPIN_409_SELECT);
8629 
8630 	/* WaDisableHiZPlanesWhenMSAAEnabled */
8631 	I915_WRITE(_3D_CHICKEN,
8632 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
8633 
8634 	I915_WRITE(WM3_LP_ILK, 0);
8635 	I915_WRITE(WM2_LP_ILK, 0);
8636 	I915_WRITE(WM1_LP_ILK, 0);
8637 
8638 	I915_WRITE(GEN6_UCGCTL1,
8639 		   I915_READ(GEN6_UCGCTL1) |
8640 		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
8641 
8642 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8643 	 * gating disable must be set.  Failure to set it results in
8644 	 * flickering pixels due to Z write ordering failures after
8645 	 * some amount of runtime in the Mesa "fire" demo, and Unigine
8646 	 * Sanctuary and Tropics, and apparently anything else with
8647 	 * alpha test or pixel discard.
8648 	 *
8649 	 * According to the spec, bit 11 (RCCUNIT) must also be set,
8650 	 * but we didn't debug actual testcases to find it out.
8651 	 */
8652 	I915_WRITE(GEN6_UCGCTL2,
8653 		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8654 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8655 
8656 	/*
8657 	 * According to the spec the following bits should be
8658 	 * set in order to enable memory self-refresh and fbc:
8659 	 * The bit21 and bit22 of 0x42000
8660 	 * The bit21 and bit22 of 0x42004
8661 	 * The bit5 and bit7 of 0x42020
8662 	 * The bit14 of 0x70180
8663 	 * The bit14 of 0x71180
8664 	 */
8665 	I915_WRITE(ILK_DISPLAY_CHICKEN1,
8666 		   I915_READ(ILK_DISPLAY_CHICKEN1) |
8667 		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8668 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8669 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8670 		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8671 	I915_WRITE(ILK_DSPCLK_GATE,
8672 		   I915_READ(ILK_DSPCLK_GATE) |
8673 		   ILK_DPARB_CLK_GATE  |
8674 		   ILK_DPFD_CLK_GATE);
8675 
8676 	for_each_pipe(pipe) {
8677 		I915_WRITE(DSPCNTR(pipe),
8678 			   I915_READ(DSPCNTR(pipe)) |
8679 			   DISPPLANE_TRICKLE_FEED_DISABLE);
8680 		intel_flush_display_plane(dev_priv, pipe);
8681 	}
8682 
8683 	/* The default value should be 0x200 according to docs, but the two
8684 	 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
8685 	I915_WRITE(GEN6_GT_MODE, 0xffff << 16);
8686 	I915_WRITE(GEN6_GT_MODE, GEN6_GT_MODE_HI << 16 | GEN6_GT_MODE_HI);
8687 }
8688 
gen7_setup_fixed_func_scheduler(struct drm_i915_private * dev_priv)8689 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
8690 {
8691 	uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
8692 
8693 	reg &= ~GEN7_FF_SCHED_MASK;
8694 	reg |= GEN7_FF_TS_SCHED_HW;
8695 	reg |= GEN7_FF_VS_SCHED_HW;
8696 	reg |= GEN7_FF_DS_SCHED_HW;
8697 
8698 	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
8699 }
8700 
ivybridge_init_clock_gating(struct drm_device * dev)8701 static void ivybridge_init_clock_gating(struct drm_device *dev)
8702 {
8703 	struct drm_i915_private *dev_priv = dev->dev_private;
8704 	int pipe;
8705 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8706 
8707 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8708 
8709 	I915_WRITE(WM3_LP_ILK, 0);
8710 	I915_WRITE(WM2_LP_ILK, 0);
8711 	I915_WRITE(WM1_LP_ILK, 0);
8712 
8713 	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8714 	 * This implements the WaDisableRCZUnitClockGating workaround.
8715 	 */
8716 	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8717 
8718 	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8719 
8720 	I915_WRITE(IVB_CHICKEN3,
8721 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8722 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
8723 
8724 	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8725 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8726 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8727 
8728 	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8729 	I915_WRITE(GEN7_L3CNTLREG1,
8730 			GEN7_WA_FOR_GEN7_L3_CONTROL);
8731 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8732 			GEN7_WA_L3_CHICKEN_MODE);
8733 
8734 	/* This is required by WaCatErrorRejectionIssue */
8735 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8736 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8737 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8738 
8739 	for_each_pipe(pipe) {
8740 		I915_WRITE(DSPCNTR(pipe),
8741 			   I915_READ(DSPCNTR(pipe)) |
8742 			   DISPPLANE_TRICKLE_FEED_DISABLE);
8743 		intel_flush_display_plane(dev_priv, pipe);
8744 	}
8745 
8746 	gen7_setup_fixed_func_scheduler(dev_priv);
8747 }
8748 
g4x_init_clock_gating(struct drm_device * dev)8749 static void g4x_init_clock_gating(struct drm_device *dev)
8750 {
8751 	struct drm_i915_private *dev_priv = dev->dev_private;
8752 	uint32_t dspclk_gate;
8753 
8754 	I915_WRITE(RENCLK_GATE_D1, 0);
8755 	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8756 		   GS_UNIT_CLOCK_GATE_DISABLE |
8757 		   CL_UNIT_CLOCK_GATE_DISABLE);
8758 	I915_WRITE(RAMCLK_GATE_D, 0);
8759 	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8760 		OVRUNIT_CLOCK_GATE_DISABLE |
8761 		OVCUNIT_CLOCK_GATE_DISABLE;
8762 	if (IS_GM45(dev))
8763 		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8764 	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8765 }
8766 
crestline_init_clock_gating(struct drm_device * dev)8767 static void crestline_init_clock_gating(struct drm_device *dev)
8768 {
8769 	struct drm_i915_private *dev_priv = dev->dev_private;
8770 
8771 	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8772 	I915_WRITE(RENCLK_GATE_D2, 0);
8773 	I915_WRITE(DSPCLK_GATE_D, 0);
8774 	I915_WRITE(RAMCLK_GATE_D, 0);
8775 	I915_WRITE16(DEUC, 0);
8776 }
8777 
broadwater_init_clock_gating(struct drm_device * dev)8778 static void broadwater_init_clock_gating(struct drm_device *dev)
8779 {
8780 	struct drm_i915_private *dev_priv = dev->dev_private;
8781 
8782 	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8783 		   I965_RCC_CLOCK_GATE_DISABLE |
8784 		   I965_RCPB_CLOCK_GATE_DISABLE |
8785 		   I965_ISC_CLOCK_GATE_DISABLE |
8786 		   I965_FBC_CLOCK_GATE_DISABLE);
8787 	I915_WRITE(RENCLK_GATE_D2, 0);
8788 }
8789 
gen3_init_clock_gating(struct drm_device * dev)8790 static void gen3_init_clock_gating(struct drm_device *dev)
8791 {
8792 	struct drm_i915_private *dev_priv = dev->dev_private;
8793 	u32 dstate = I915_READ(D_STATE);
8794 
8795 	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8796 		DSTATE_DOT_CLOCK_GATING;
8797 	I915_WRITE(D_STATE, dstate);
8798 }
8799 
i85x_init_clock_gating(struct drm_device * dev)8800 static void i85x_init_clock_gating(struct drm_device *dev)
8801 {
8802 	struct drm_i915_private *dev_priv = dev->dev_private;
8803 
8804 	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8805 }
8806 
i830_init_clock_gating(struct drm_device * dev)8807 static void i830_init_clock_gating(struct drm_device *dev)
8808 {
8809 	struct drm_i915_private *dev_priv = dev->dev_private;
8810 
8811 	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
8812 }
8813 
ibx_init_clock_gating(struct drm_device * dev)8814 static void ibx_init_clock_gating(struct drm_device *dev)
8815 {
8816 	struct drm_i915_private *dev_priv = dev->dev_private;
8817 
8818 	/*
8819 	 * On Ibex Peak and Cougar Point, we need to disable clock
8820 	 * gating for the panel power sequencer or it will fail to
8821 	 * start up when no ports are active.
8822 	 */
8823 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8824 }
8825 
cpt_init_clock_gating(struct drm_device * dev)8826 static void cpt_init_clock_gating(struct drm_device *dev)
8827 {
8828 	struct drm_i915_private *dev_priv = dev->dev_private;
8829 	int pipe;
8830 
8831 	/*
8832 	 * On Ibex Peak and Cougar Point, we need to disable clock
8833 	 * gating for the panel power sequencer or it will fail to
8834 	 * start up when no ports are active.
8835 	 */
8836 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8837 	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8838 		   DPLS_EDP_PPS_FIX_DIS);
8839 	/* Without this, mode sets may fail silently on FDI */
8840 	for_each_pipe(pipe)
8841 		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
8842 }
8843 
ironlake_teardown_rc6(struct drm_device * dev)8844 static void ironlake_teardown_rc6(struct drm_device *dev)
8845 {
8846 	struct drm_i915_private *dev_priv = dev->dev_private;
8847 
8848 	if (dev_priv->renderctx) {
8849 		i915_gem_object_unpin(dev_priv->renderctx);
8850 		drm_gem_object_unreference(&dev_priv->renderctx->base);
8851 		dev_priv->renderctx = NULL;
8852 	}
8853 
8854 	if (dev_priv->pwrctx) {
8855 		i915_gem_object_unpin(dev_priv->pwrctx);
8856 		drm_gem_object_unreference(&dev_priv->pwrctx->base);
8857 		dev_priv->pwrctx = NULL;
8858 	}
8859 }
8860 
ironlake_disable_rc6(struct drm_device * dev)8861 static void ironlake_disable_rc6(struct drm_device *dev)
8862 {
8863 	struct drm_i915_private *dev_priv = dev->dev_private;
8864 
8865 	if (I915_READ(PWRCTXA)) {
8866 		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8867 		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8868 		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8869 			 50);
8870 
8871 		I915_WRITE(PWRCTXA, 0);
8872 		POSTING_READ(PWRCTXA);
8873 
8874 		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8875 		POSTING_READ(RSTDBYCTL);
8876 	}
8877 
8878 	ironlake_teardown_rc6(dev);
8879 }
8880 
ironlake_setup_rc6(struct drm_device * dev)8881 static int ironlake_setup_rc6(struct drm_device *dev)
8882 {
8883 	struct drm_i915_private *dev_priv = dev->dev_private;
8884 
8885 	if (dev_priv->renderctx == NULL)
8886 		dev_priv->renderctx = intel_alloc_context_page(dev);
8887 	if (!dev_priv->renderctx)
8888 		return -ENOMEM;
8889 
8890 	if (dev_priv->pwrctx == NULL)
8891 		dev_priv->pwrctx = intel_alloc_context_page(dev);
8892 	if (!dev_priv->pwrctx) {
8893 		ironlake_teardown_rc6(dev);
8894 		return -ENOMEM;
8895 	}
8896 
8897 	return 0;
8898 }
8899 
ironlake_enable_rc6(struct drm_device * dev)8900 void ironlake_enable_rc6(struct drm_device *dev)
8901 {
8902 	struct drm_i915_private *dev_priv = dev->dev_private;
8903 	int ret;
8904 
8905 	/* rc6 disabled by default due to repeated reports of hanging during
8906 	 * boot and resume.
8907 	 */
8908 	if (!intel_enable_rc6(dev))
8909 		return;
8910 
8911 	mutex_lock(&dev->struct_mutex);
8912 	ret = ironlake_setup_rc6(dev);
8913 	if (ret) {
8914 		mutex_unlock(&dev->struct_mutex);
8915 		return;
8916 	}
8917 
8918 	/*
8919 	 * GPU can automatically power down the render unit if given a page
8920 	 * to save state.
8921 	 */
8922 	ret = BEGIN_LP_RING(6);
8923 	if (ret) {
8924 		ironlake_teardown_rc6(dev);
8925 		mutex_unlock(&dev->struct_mutex);
8926 		return;
8927 	}
8928 
8929 	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8930 	OUT_RING(MI_SET_CONTEXT);
8931 	OUT_RING(dev_priv->renderctx->gtt_offset |
8932 		 MI_MM_SPACE_GTT |
8933 		 MI_SAVE_EXT_STATE_EN |
8934 		 MI_RESTORE_EXT_STATE_EN |
8935 		 MI_RESTORE_INHIBIT);
8936 	OUT_RING(MI_SUSPEND_FLUSH);
8937 	OUT_RING(MI_NOOP);
8938 	OUT_RING(MI_FLUSH);
8939 	ADVANCE_LP_RING();
8940 
8941 	/*
8942 	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8943 	 * does an implicit flush, combined with MI_FLUSH above, it should be
8944 	 * safe to assume that renderctx is valid
8945 	 */
8946 	ret = intel_wait_ring_idle(LP_RING(dev_priv));
8947 	if (ret) {
8948 		DRM_ERROR("failed to enable ironlake power power savings\n");
8949 		ironlake_teardown_rc6(dev);
8950 		mutex_unlock(&dev->struct_mutex);
8951 		return;
8952 	}
8953 
8954 	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8955 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8956 	mutex_unlock(&dev->struct_mutex);
8957 }
8958 
intel_init_clock_gating(struct drm_device * dev)8959 void intel_init_clock_gating(struct drm_device *dev)
8960 {
8961 	struct drm_i915_private *dev_priv = dev->dev_private;
8962 
8963 	dev_priv->display.init_clock_gating(dev);
8964 
8965 	if (dev_priv->display.init_pch_clock_gating)
8966 		dev_priv->display.init_pch_clock_gating(dev);
8967 }
8968 
8969 /* Set up chip specific display functions */
intel_init_display(struct drm_device * dev)8970 static void intel_init_display(struct drm_device *dev)
8971 {
8972 	struct drm_i915_private *dev_priv = dev->dev_private;
8973 
8974 	/* We always want a DPMS function */
8975 	if (HAS_PCH_SPLIT(dev)) {
8976 		dev_priv->display.dpms = ironlake_crtc_dpms;
8977 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8978 		dev_priv->display.update_plane = ironlake_update_plane;
8979 	} else {
8980 		dev_priv->display.dpms = i9xx_crtc_dpms;
8981 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8982 		dev_priv->display.update_plane = i9xx_update_plane;
8983 	}
8984 
8985 	if (I915_HAS_FBC(dev)) {
8986 		if (HAS_PCH_SPLIT(dev)) {
8987 			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8988 			dev_priv->display.enable_fbc = ironlake_enable_fbc;
8989 			dev_priv->display.disable_fbc = ironlake_disable_fbc;
8990 		} else if (IS_GM45(dev)) {
8991 			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8992 			dev_priv->display.enable_fbc = g4x_enable_fbc;
8993 			dev_priv->display.disable_fbc = g4x_disable_fbc;
8994 		} else if (IS_CRESTLINE(dev)) {
8995 			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8996 			dev_priv->display.enable_fbc = i8xx_enable_fbc;
8997 			dev_priv->display.disable_fbc = i8xx_disable_fbc;
8998 		}
8999 		/* 855GM needs testing */
9000 	}
9001 
9002 	/* Returns the core display clock speed */
9003 	if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
9004 		dev_priv->display.get_display_clock_speed =
9005 			i945_get_display_clock_speed;
9006 	else if (IS_I915G(dev))
9007 		dev_priv->display.get_display_clock_speed =
9008 			i915_get_display_clock_speed;
9009 	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
9010 		dev_priv->display.get_display_clock_speed =
9011 			i9xx_misc_get_display_clock_speed;
9012 	else if (IS_I915GM(dev))
9013 		dev_priv->display.get_display_clock_speed =
9014 			i915gm_get_display_clock_speed;
9015 	else if (IS_I865G(dev))
9016 		dev_priv->display.get_display_clock_speed =
9017 			i865_get_display_clock_speed;
9018 	else if (IS_I85X(dev))
9019 		dev_priv->display.get_display_clock_speed =
9020 			i855_get_display_clock_speed;
9021 	else /* 852, 830 */
9022 		dev_priv->display.get_display_clock_speed =
9023 			i830_get_display_clock_speed;
9024 
9025 	/* For FIFO watermark updates */
9026 	if (HAS_PCH_SPLIT(dev)) {
9027 		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
9028 		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
9029 
9030 		/* IVB configs may use multi-threaded forcewake */
9031 		if (IS_IVYBRIDGE(dev)) {
9032 			u32	ecobus;
9033 
9034 			/* A small trick here - if the bios hasn't configured MT forcewake,
9035 			 * and if the device is in RC6, then force_wake_mt_get will not wake
9036 			 * the device and the ECOBUS read will return zero. Which will be
9037 			 * (correctly) interpreted by the test below as MT forcewake being
9038 			 * disabled.
9039 			 */
9040 			mutex_lock(&dev->struct_mutex);
9041 			__gen6_gt_force_wake_mt_get(dev_priv);
9042 			ecobus = I915_READ_NOTRACE(ECOBUS);
9043 			__gen6_gt_force_wake_mt_put(dev_priv);
9044 			mutex_unlock(&dev->struct_mutex);
9045 
9046 			if (ecobus & FORCEWAKE_MT_ENABLE) {
9047 				DRM_DEBUG_KMS("Using MT version of forcewake\n");
9048 				dev_priv->display.force_wake_get =
9049 					__gen6_gt_force_wake_mt_get;
9050 				dev_priv->display.force_wake_put =
9051 					__gen6_gt_force_wake_mt_put;
9052 			}
9053 		}
9054 
9055 		if (HAS_PCH_IBX(dev))
9056 			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
9057 		else if (HAS_PCH_CPT(dev))
9058 			dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
9059 
9060 		if (IS_GEN5(dev)) {
9061 			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
9062 				dev_priv->display.update_wm = ironlake_update_wm;
9063 			else {
9064 				DRM_DEBUG_KMS("Failed to get proper latency. "
9065 					      "Disable CxSR\n");
9066 				dev_priv->display.update_wm = NULL;
9067 			}
9068 			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9069 			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
9070 			dev_priv->display.write_eld = ironlake_write_eld;
9071 		} else if (IS_GEN6(dev)) {
9072 			if (SNB_READ_WM0_LATENCY()) {
9073 				dev_priv->display.update_wm = sandybridge_update_wm;
9074 				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9075 			} else {
9076 				DRM_DEBUG_KMS("Failed to read display plane latency. "
9077 					      "Disable CxSR\n");
9078 				dev_priv->display.update_wm = NULL;
9079 			}
9080 			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9081 			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9082 			dev_priv->display.write_eld = ironlake_write_eld;
9083 		} else if (IS_IVYBRIDGE(dev)) {
9084 			/* FIXME: detect B0+ stepping and use auto training */
9085 			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9086 			if (SNB_READ_WM0_LATENCY()) {
9087 				dev_priv->display.update_wm = sandybridge_update_wm;
9088 				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9089 			} else {
9090 				DRM_DEBUG_KMS("Failed to read display plane latency. "
9091 					      "Disable CxSR\n");
9092 				dev_priv->display.update_wm = NULL;
9093 			}
9094 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9095 			dev_priv->display.write_eld = ironlake_write_eld;
9096 		} else
9097 			dev_priv->display.update_wm = NULL;
9098 	} else if (IS_PINEVIEW(dev)) {
9099 		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
9100 					    dev_priv->is_ddr3,
9101 					    dev_priv->fsb_freq,
9102 					    dev_priv->mem_freq)) {
9103 			DRM_INFO("failed to find known CxSR latency "
9104 				 "(found ddr%s fsb freq %d, mem freq %d), "
9105 				 "disabling CxSR\n",
9106 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9107 				 dev_priv->fsb_freq, dev_priv->mem_freq);
9108 			/* Disable CxSR and never update its watermark again */
9109 			pineview_disable_cxsr(dev);
9110 			dev_priv->display.update_wm = NULL;
9111 		} else
9112 			dev_priv->display.update_wm = pineview_update_wm;
9113 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9114 	} else if (IS_G4X(dev)) {
9115 		dev_priv->display.write_eld = g4x_write_eld;
9116 		dev_priv->display.update_wm = g4x_update_wm;
9117 		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9118 	} else if (IS_GEN4(dev)) {
9119 		dev_priv->display.update_wm = i965_update_wm;
9120 		if (IS_CRESTLINE(dev))
9121 			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9122 		else if (IS_BROADWATER(dev))
9123 			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9124 	} else if (IS_GEN3(dev)) {
9125 		dev_priv->display.update_wm = i9xx_update_wm;
9126 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9127 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9128 	} else if (IS_I865G(dev)) {
9129 		dev_priv->display.update_wm = i830_update_wm;
9130 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9131 		dev_priv->display.get_fifo_size = i830_get_fifo_size;
9132 	} else if (IS_I85X(dev)) {
9133 		dev_priv->display.update_wm = i9xx_update_wm;
9134 		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9135 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9136 	} else {
9137 		dev_priv->display.update_wm = i830_update_wm;
9138 		dev_priv->display.init_clock_gating = i830_init_clock_gating;
9139 		if (IS_845G(dev))
9140 			dev_priv->display.get_fifo_size = i845_get_fifo_size;
9141 		else
9142 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
9143 	}
9144 
9145 	/* Default just returns -ENODEV to indicate unsupported */
9146 	dev_priv->display.queue_flip = intel_default_queue_flip;
9147 
9148 	switch (INTEL_INFO(dev)->gen) {
9149 	case 2:
9150 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
9151 		break;
9152 
9153 	case 3:
9154 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
9155 		break;
9156 
9157 	case 4:
9158 	case 5:
9159 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
9160 		break;
9161 
9162 	case 6:
9163 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
9164 		break;
9165 	case 7:
9166 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
9167 		break;
9168 	}
9169 }
9170 
9171 /*
9172  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
9173  * resume, or other times.  This quirk makes sure that's the case for
9174  * affected systems.
9175  */
quirk_pipea_force(struct drm_device * dev)9176 static void quirk_pipea_force(struct drm_device *dev)
9177 {
9178 	struct drm_i915_private *dev_priv = dev->dev_private;
9179 
9180 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9181 	DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
9182 }
9183 
9184 /*
9185  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9186  */
quirk_ssc_force_disable(struct drm_device * dev)9187 static void quirk_ssc_force_disable(struct drm_device *dev)
9188 {
9189 	struct drm_i915_private *dev_priv = dev->dev_private;
9190 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9191 }
9192 
9193 /*
9194  * Some machines (Dell XPS13) suffer broken backlight controls if
9195  * BLM_PCH_PWM_ENABLE is set.
9196  */
quirk_no_pcm_pwm_enable(struct drm_device * dev)9197 static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
9198 {
9199 	struct drm_i915_private *dev_priv = dev->dev_private;
9200 	dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
9201 	DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
9202 }
9203 
9204 /*
9205  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
9206  * brightness value
9207  */
quirk_invert_brightness(struct drm_device * dev)9208 static void quirk_invert_brightness(struct drm_device *dev)
9209 {
9210 	struct drm_i915_private *dev_priv = dev->dev_private;
9211 	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
9212 }
9213 
9214 struct intel_quirk {
9215 	int device;
9216 	int subsystem_vendor;
9217 	int subsystem_device;
9218 	void (*hook)(struct drm_device *dev);
9219 };
9220 
9221 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
9222 struct intel_dmi_quirk {
9223 	void (*hook)(struct drm_device *dev);
9224 	const struct dmi_system_id (*dmi_id_list)[];
9225 };
9226 
intel_dmi_reverse_brightness(const struct dmi_system_id * id)9227 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
9228 {
9229 	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
9230 	return 1;
9231 }
9232 
9233 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
9234 	{
9235 		.dmi_id_list = &(const struct dmi_system_id[]) {
9236 			{
9237 				.callback = intel_dmi_reverse_brightness,
9238 				.ident = "NCR Corporation",
9239 				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
9240 					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
9241 				},
9242 			},
9243 			{ }  /* terminating entry */
9244 		},
9245 		.hook = quirk_invert_brightness,
9246 	},
9247 };
9248 
9249 struct intel_quirk intel_quirks[] = {
9250 	/* HP Mini needs pipe A force quirk (LP: #322104) */
9251 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9252 
9253 	/* Thinkpad R31 needs pipe A force quirk */
9254 	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
9255 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
9256 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
9257 
9258 	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
9259 	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
9260 	/* ThinkPad X40 needs pipe A force quirk */
9261 
9262 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
9263 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
9264 
9265 	/* 855 & before need to leave pipe A & dpll A up */
9266 	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9267 	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9268 
9269 	/* Lenovo U160 cannot use SSC on LVDS */
9270 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
9271 
9272 	/* Sony Vaio Y cannot use SSC on LVDS */
9273 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
9274 
9275 	/* Acer Aspire 5734Z must invert backlight brightness */
9276 	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
9277 
9278 	/* Acer/eMachines G725 */
9279 	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
9280 
9281 	/* Acer/eMachines e725 */
9282 	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
9283 
9284 	/* Acer/Packard Bell NCL20 */
9285 	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
9286 
9287 	/* Dell XPS13 HD Sandy Bridge */
9288 	{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
9289 	/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
9290 	{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
9291 };
9292 
intel_init_quirks(struct drm_device * dev)9293 static void intel_init_quirks(struct drm_device *dev)
9294 {
9295 	struct pci_dev *d = dev->pdev;
9296 	int i;
9297 
9298 	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
9299 		struct intel_quirk *q = &intel_quirks[i];
9300 
9301 		if (d->device == q->device &&
9302 		    (d->subsystem_vendor == q->subsystem_vendor ||
9303 		     q->subsystem_vendor == PCI_ANY_ID) &&
9304 		    (d->subsystem_device == q->subsystem_device ||
9305 		     q->subsystem_device == PCI_ANY_ID))
9306 			q->hook(dev);
9307 	}
9308 	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
9309 		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
9310 			intel_dmi_quirks[i].hook(dev);
9311 	}
9312 }
9313 
9314 /* Disable the VGA plane that we never use */
i915_disable_vga(struct drm_device * dev)9315 static void i915_disable_vga(struct drm_device *dev)
9316 {
9317 	struct drm_i915_private *dev_priv = dev->dev_private;
9318 	u8 sr1;
9319 	u32 vga_reg;
9320 
9321 	if (HAS_PCH_SPLIT(dev))
9322 		vga_reg = CPU_VGACNTRL;
9323 	else
9324 		vga_reg = VGACNTRL;
9325 
9326 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9327 	outb(1, VGA_SR_INDEX);
9328 	sr1 = inb(VGA_SR_DATA);
9329 	outb(sr1 | 1<<5, VGA_SR_DATA);
9330 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
9331 	udelay(300);
9332 
9333 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9334 	POSTING_READ(vga_reg);
9335 }
9336 
i915_redisable_vga(struct drm_device * dev)9337 void i915_redisable_vga(struct drm_device *dev)
9338 {
9339 	struct drm_i915_private *dev_priv = dev->dev_private;
9340 	u32 vga_reg;
9341 
9342 	if (HAS_PCH_SPLIT(dev))
9343 		vga_reg = CPU_VGACNTRL;
9344 	else
9345 		vga_reg = VGACNTRL;
9346 
9347 	if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
9348 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
9349 		I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9350 		POSTING_READ(vga_reg);
9351 	}
9352 }
9353 
intel_modeset_init(struct drm_device * dev)9354 void intel_modeset_init(struct drm_device *dev)
9355 {
9356 	struct drm_i915_private *dev_priv = dev->dev_private;
9357 	int i, ret;
9358 
9359 	drm_mode_config_init(dev);
9360 
9361 	dev->mode_config.min_width = 0;
9362 	dev->mode_config.min_height = 0;
9363 
9364 	dev->mode_config.preferred_depth = 24;
9365 	dev->mode_config.prefer_shadow = 1;
9366 
9367 	dev->mode_config.funcs = (void *)&intel_mode_funcs;
9368 
9369 	intel_init_quirks(dev);
9370 
9371 	intel_init_display(dev);
9372 
9373 	if (IS_GEN2(dev)) {
9374 		dev->mode_config.max_width = 2048;
9375 		dev->mode_config.max_height = 2048;
9376 	} else if (IS_GEN3(dev)) {
9377 		dev->mode_config.max_width = 4096;
9378 		dev->mode_config.max_height = 4096;
9379 	} else {
9380 		dev->mode_config.max_width = 8192;
9381 		dev->mode_config.max_height = 8192;
9382 	}
9383 	dev->mode_config.fb_base = dev->agp->base;
9384 
9385 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
9386 		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
9387 
9388 	for (i = 0; i < dev_priv->num_pipe; i++) {
9389 		intel_crtc_init(dev, i);
9390 		ret = intel_plane_init(dev, i);
9391 		if (ret)
9392 			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
9393 	}
9394 
9395 	/* Just disable it once at startup */
9396 	i915_disable_vga(dev);
9397 	intel_setup_outputs(dev);
9398 
9399 	intel_init_clock_gating(dev);
9400 
9401 	if (IS_IRONLAKE_M(dev)) {
9402 		ironlake_enable_drps(dev);
9403 		intel_init_emon(dev);
9404 	}
9405 
9406 	if (IS_GEN6(dev) || IS_GEN7(dev)) {
9407 		gen6_enable_rps(dev_priv);
9408 		gen6_update_ring_freq(dev_priv);
9409 	}
9410 
9411 	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
9412 	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
9413 		    (unsigned long)dev);
9414 }
9415 
intel_modeset_gem_init(struct drm_device * dev)9416 void intel_modeset_gem_init(struct drm_device *dev)
9417 {
9418 	if (IS_IRONLAKE_M(dev))
9419 		ironlake_enable_rc6(dev);
9420 
9421 	intel_setup_overlay(dev);
9422 }
9423 
intel_modeset_cleanup(struct drm_device * dev)9424 void intel_modeset_cleanup(struct drm_device *dev)
9425 {
9426 	struct drm_i915_private *dev_priv = dev->dev_private;
9427 	struct drm_crtc *crtc;
9428 	struct intel_crtc *intel_crtc;
9429 
9430 	drm_kms_helper_poll_fini(dev);
9431 	mutex_lock(&dev->struct_mutex);
9432 
9433 	intel_unregister_dsm_handler();
9434 
9435 
9436 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9437 		/* Skip inactive CRTCs */
9438 		if (!crtc->fb)
9439 			continue;
9440 
9441 		intel_crtc = to_intel_crtc(crtc);
9442 		intel_increase_pllclock(crtc);
9443 	}
9444 
9445 	intel_disable_fbc(dev);
9446 
9447 	if (IS_IRONLAKE_M(dev))
9448 		ironlake_disable_drps(dev);
9449 	if (IS_GEN6(dev) || IS_GEN7(dev))
9450 		gen6_disable_rps(dev);
9451 
9452 	if (IS_IRONLAKE_M(dev))
9453 		ironlake_disable_rc6(dev);
9454 
9455 	mutex_unlock(&dev->struct_mutex);
9456 
9457 	/* Disable the irq before mode object teardown, for the irq might
9458 	 * enqueue unpin/hotplug work. */
9459 	drm_irq_uninstall(dev);
9460 	cancel_work_sync(&dev_priv->hotplug_work);
9461 	cancel_work_sync(&dev_priv->rps_work);
9462 
9463 	/* flush any delayed tasks or pending work */
9464 	flush_scheduled_work();
9465 
9466 	/* Shut off idle work before the crtcs get freed. */
9467 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9468 		intel_crtc = to_intel_crtc(crtc);
9469 		del_timer_sync(&intel_crtc->idle_timer);
9470 	}
9471 	del_timer_sync(&dev_priv->idle_timer);
9472 	cancel_work_sync(&dev_priv->idle_work);
9473 
9474 	/* destroy backlight, if any, before the connectors */
9475 	intel_panel_destroy_backlight(dev);
9476 
9477 	drm_mode_config_cleanup(dev);
9478 }
9479 
9480 /*
9481  * Return which encoder is currently attached for connector.
9482  */
intel_best_encoder(struct drm_connector * connector)9483 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
9484 {
9485 	return &intel_attached_encoder(connector)->base;
9486 }
9487 
intel_connector_attach_encoder(struct intel_connector * connector,struct intel_encoder * encoder)9488 void intel_connector_attach_encoder(struct intel_connector *connector,
9489 				    struct intel_encoder *encoder)
9490 {
9491 	connector->encoder = encoder;
9492 	drm_mode_connector_attach_encoder(&connector->base,
9493 					  &encoder->base);
9494 }
9495 
9496 /*
9497  * set vga decode state - true == enable VGA decode
9498  */
intel_modeset_vga_set_state(struct drm_device * dev,bool state)9499 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
9500 {
9501 	struct drm_i915_private *dev_priv = dev->dev_private;
9502 	u16 gmch_ctrl;
9503 
9504 	pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
9505 	if (state)
9506 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
9507 	else
9508 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
9509 	pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
9510 	return 0;
9511 }
9512 
9513 #ifdef CONFIG_DEBUG_FS
9514 #include <linux/seq_file.h>
9515 
9516 struct intel_display_error_state {
9517 	struct intel_cursor_error_state {
9518 		u32 control;
9519 		u32 position;
9520 		u32 base;
9521 		u32 size;
9522 	} cursor[2];
9523 
9524 	struct intel_pipe_error_state {
9525 		u32 conf;
9526 		u32 source;
9527 
9528 		u32 htotal;
9529 		u32 hblank;
9530 		u32 hsync;
9531 		u32 vtotal;
9532 		u32 vblank;
9533 		u32 vsync;
9534 	} pipe[2];
9535 
9536 	struct intel_plane_error_state {
9537 		u32 control;
9538 		u32 stride;
9539 		u32 size;
9540 		u32 pos;
9541 		u32 addr;
9542 		u32 surface;
9543 		u32 tile_offset;
9544 	} plane[2];
9545 };
9546 
9547 struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device * dev)9548 intel_display_capture_error_state(struct drm_device *dev)
9549 {
9550 	drm_i915_private_t *dev_priv = dev->dev_private;
9551 	struct intel_display_error_state *error;
9552 	int i;
9553 
9554 	error = kmalloc(sizeof(*error), GFP_ATOMIC);
9555 	if (error == NULL)
9556 		return NULL;
9557 
9558 	for (i = 0; i < 2; i++) {
9559 		error->cursor[i].control = I915_READ(CURCNTR(i));
9560 		error->cursor[i].position = I915_READ(CURPOS(i));
9561 		error->cursor[i].base = I915_READ(CURBASE(i));
9562 
9563 		error->plane[i].control = I915_READ(DSPCNTR(i));
9564 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9565 		error->plane[i].size = I915_READ(DSPSIZE(i));
9566 		error->plane[i].pos = I915_READ(DSPPOS(i));
9567 		error->plane[i].addr = I915_READ(DSPADDR(i));
9568 		if (INTEL_INFO(dev)->gen >= 4) {
9569 			error->plane[i].surface = I915_READ(DSPSURF(i));
9570 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9571 		}
9572 
9573 		error->pipe[i].conf = I915_READ(PIPECONF(i));
9574 		error->pipe[i].source = I915_READ(PIPESRC(i));
9575 		error->pipe[i].htotal = I915_READ(HTOTAL(i));
9576 		error->pipe[i].hblank = I915_READ(HBLANK(i));
9577 		error->pipe[i].hsync = I915_READ(HSYNC(i));
9578 		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
9579 		error->pipe[i].vblank = I915_READ(VBLANK(i));
9580 		error->pipe[i].vsync = I915_READ(VSYNC(i));
9581 	}
9582 
9583 	return error;
9584 }
9585 
9586 void
intel_display_print_error_state(struct seq_file * m,struct drm_device * dev,struct intel_display_error_state * error)9587 intel_display_print_error_state(struct seq_file *m,
9588 				struct drm_device *dev,
9589 				struct intel_display_error_state *error)
9590 {
9591 	int i;
9592 
9593 	for (i = 0; i < 2; i++) {
9594 		seq_printf(m, "Pipe [%d]:\n", i);
9595 		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
9596 		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
9597 		seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
9598 		seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
9599 		seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
9600 		seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
9601 		seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
9602 		seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
9603 
9604 		seq_printf(m, "Plane [%d]:\n", i);
9605 		seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
9606 		seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
9607 		seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
9608 		seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
9609 		seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
9610 		if (INTEL_INFO(dev)->gen >= 4) {
9611 			seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
9612 			seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
9613 		}
9614 
9615 		seq_printf(m, "Cursor [%d]:\n", i);
9616 		seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
9617 		seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
9618 		seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
9619 	}
9620 }
9621 #endif
9622