1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34 #include <linux/string_helpers.h>
35 #include <linux/vga_switcheroo.h>
36
37 #include <drm/display/drm_dp_helper.h>
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_atomic_uapi.h>
41 #include <drm/drm_damage_helper.h>
42 #include <drm/drm_edid.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_privacy_screen_consumer.h>
45 #include <drm/drm_probe_helper.h>
46 #include <drm/drm_rect.h>
47
48 #include "display/intel_audio.h"
49 #include "display/intel_crt.h"
50 #include "display/intel_ddi.h"
51 #include "display/intel_display_debugfs.h"
52 #include "display/intel_display_power.h"
53 #include "display/intel_dp.h"
54 #include "display/intel_dp_mst.h"
55 #include "display/intel_dpll.h"
56 #include "display/intel_dpll_mgr.h"
57 #include "display/intel_drrs.h"
58 #include "display/intel_dsi.h"
59 #include "display/intel_dvo.h"
60 #include "display/intel_fb.h"
61 #include "display/intel_gmbus.h"
62 #include "display/intel_hdmi.h"
63 #include "display/intel_lvds.h"
64 #include "display/intel_sdvo.h"
65 #include "display/intel_snps_phy.h"
66 #include "display/intel_tv.h"
67 #include "display/intel_vdsc.h"
68 #include "display/intel_vrr.h"
69
70 #include "gem/i915_gem_lmem.h"
71 #include "gem/i915_gem_object.h"
72
73 #include "gt/gen8_ppgtt.h"
74
75 #include "g4x_dp.h"
76 #include "g4x_hdmi.h"
77 #include "hsw_ips.h"
78 #include "i915_drv.h"
79 #include "i915_utils.h"
80 #include "icl_dsi.h"
81 #include "intel_acpi.h"
82 #include "intel_atomic.h"
83 #include "intel_atomic_plane.h"
84 #include "intel_bw.h"
85 #include "intel_cdclk.h"
86 #include "intel_color.h"
87 #include "intel_crtc.h"
88 #include "intel_crtc_state_dump.h"
89 #include "intel_de.h"
90 #include "intel_display_types.h"
91 #include "intel_dmc.h"
92 #include "intel_dp_link_training.h"
93 #include "intel_dpt.h"
94 #include "intel_dsb.h"
95 #include "intel_fbc.h"
96 #include "intel_fbdev.h"
97 #include "intel_fdi.h"
98 #include "intel_fifo_underrun.h"
99 #include "intel_frontbuffer.h"
100 #include "intel_hdcp.h"
101 #include "intel_hotplug.h"
102 #include "intel_modeset_verify.h"
103 #include "intel_modeset_setup.h"
104 #include "intel_overlay.h"
105 #include "intel_panel.h"
106 #include "intel_pch_display.h"
107 #include "intel_pch_refclk.h"
108 #include "intel_pcode.h"
109 #include "intel_pipe_crc.h"
110 #include "intel_plane_initial.h"
111 #include "intel_pm.h"
112 #include "intel_pps.h"
113 #include "intel_psr.h"
114 #include "intel_quirks.h"
115 #include "intel_sprite.h"
116 #include "intel_tc.h"
117 #include "intel_vga.h"
118 #include "i9xx_plane.h"
119 #include "skl_scaler.h"
120 #include "skl_universal_plane.h"
121 #include "skl_watermark.h"
122 #include "vlv_dsi.h"
123 #include "vlv_dsi_pll.h"
124 #include "vlv_dsi_regs.h"
125 #include "vlv_sideband.h"
126
127 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
128 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
129 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
130 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
131 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
132
133 /**
134 * intel_update_watermarks - update FIFO watermark values based on current modes
135 * @dev_priv: i915 device
136 *
137 * Calculate watermark values for the various WM regs based on current mode
138 * and plane configuration.
139 *
140 * There are several cases to deal with here:
141 * - normal (i.e. non-self-refresh)
142 * - self-refresh (SR) mode
143 * - lines are large relative to FIFO size (buffer can hold up to 2)
144 * - lines are small relative to FIFO size (buffer can hold more than 2
145 * lines), so need to account for TLB latency
146 *
147 * The normal calculation is:
148 * watermark = dotclock * bytes per pixel * latency
149 * where latency is platform & configuration dependent (we assume pessimal
150 * values here).
151 *
152 * The SR calculation is:
153 * watermark = (trunc(latency/line time)+1) * surface width *
154 * bytes per pixel
155 * where
156 * line time = htotal / dotclock
157 * surface width = hdisplay for normal plane and 64 for cursor
158 * and latency is assumed to be high, as above.
159 *
160 * The final value programmed to the register should always be rounded up,
161 * and include an extra 2 entries to account for clock crossings.
162 *
163 * We don't use the sprite, so we can ignore that. And on Crestline we have
164 * to set the non-SR watermarks to 8.
165 */
intel_update_watermarks(struct drm_i915_private * dev_priv)166 void intel_update_watermarks(struct drm_i915_private *dev_priv)
167 {
168 if (dev_priv->display.funcs.wm->update_wm)
169 dev_priv->display.funcs.wm->update_wm(dev_priv);
170 }
171
intel_compute_pipe_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)172 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
173 struct intel_crtc *crtc)
174 {
175 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
176 if (dev_priv->display.funcs.wm->compute_pipe_wm)
177 return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
178 return 0;
179 }
180
intel_compute_intermediate_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)181 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
182 struct intel_crtc *crtc)
183 {
184 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
185 if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
186 return 0;
187 if (drm_WARN_ON(&dev_priv->drm,
188 !dev_priv->display.funcs.wm->compute_pipe_wm))
189 return 0;
190 return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
191 }
192
intel_initial_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)193 static bool intel_initial_watermarks(struct intel_atomic_state *state,
194 struct intel_crtc *crtc)
195 {
196 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
197 if (dev_priv->display.funcs.wm->initial_watermarks) {
198 dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
199 return true;
200 }
201 return false;
202 }
203
intel_atomic_update_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)204 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
205 struct intel_crtc *crtc)
206 {
207 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
208 if (dev_priv->display.funcs.wm->atomic_update_watermarks)
209 dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
210 }
211
intel_optimize_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)212 static void intel_optimize_watermarks(struct intel_atomic_state *state,
213 struct intel_crtc *crtc)
214 {
215 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
216 if (dev_priv->display.funcs.wm->optimize_watermarks)
217 dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
218 }
219
intel_compute_global_watermarks(struct intel_atomic_state * state)220 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
221 {
222 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
223 if (dev_priv->display.funcs.wm->compute_global_watermarks)
224 return dev_priv->display.funcs.wm->compute_global_watermarks(state);
225 return 0;
226 }
227
228 /* returns HPLL frequency in kHz */
vlv_get_hpll_vco(struct drm_i915_private * dev_priv)229 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
230 {
231 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
232
233 /* Obtain SKU information */
234 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
235 CCK_FUSE_HPLL_FREQ_MASK;
236
237 return vco_freq[hpll_freq] * 1000;
238 }
239
vlv_get_cck_clock(struct drm_i915_private * dev_priv,const char * name,u32 reg,int ref_freq)240 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
241 const char *name, u32 reg, int ref_freq)
242 {
243 u32 val;
244 int divider;
245
246 val = vlv_cck_read(dev_priv, reg);
247 divider = val & CCK_FREQUENCY_VALUES;
248
249 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
250 (divider << CCK_FREQUENCY_STATUS_SHIFT),
251 "%s change in progress\n", name);
252
253 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
254 }
255
vlv_get_cck_clock_hpll(struct drm_i915_private * dev_priv,const char * name,u32 reg)256 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
257 const char *name, u32 reg)
258 {
259 int hpll;
260
261 vlv_cck_get(dev_priv);
262
263 if (dev_priv->hpll_freq == 0)
264 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
265
266 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
267
268 vlv_cck_put(dev_priv);
269
270 return hpll;
271 }
272
intel_update_czclk(struct drm_i915_private * dev_priv)273 static void intel_update_czclk(struct drm_i915_private *dev_priv)
274 {
275 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
276 return;
277
278 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
279 CCK_CZ_CLOCK_CONTROL);
280
281 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
282 dev_priv->czclk_freq);
283 }
284
is_hdr_mode(const struct intel_crtc_state * crtc_state)285 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
286 {
287 return (crtc_state->active_planes &
288 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
289 }
290
291 /* WA Display #0827: Gen9:all */
292 static void
skl_wa_827(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)293 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
294 {
295 if (enable)
296 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
297 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
298 else
299 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
300 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
301 }
302
303 /* Wa_2006604312:icl,ehl */
304 static void
icl_wa_scalerclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)305 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
306 bool enable)
307 {
308 if (enable)
309 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
310 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
311 else
312 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
313 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
314 }
315
316 /* Wa_1604331009:icl,jsl,ehl */
317 static void
icl_wa_cursorclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)318 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
319 bool enable)
320 {
321 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
322 enable ? CURSOR_GATING_DIS : 0);
323 }
324
325 static bool
is_trans_port_sync_slave(const struct intel_crtc_state * crtc_state)326 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
327 {
328 return crtc_state->master_transcoder != INVALID_TRANSCODER;
329 }
330
331 static bool
is_trans_port_sync_master(const struct intel_crtc_state * crtc_state)332 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
333 {
334 return crtc_state->sync_mode_slaves_mask != 0;
335 }
336
337 bool
is_trans_port_sync_mode(const struct intel_crtc_state * crtc_state)338 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
339 {
340 return is_trans_port_sync_master(crtc_state) ||
341 is_trans_port_sync_slave(crtc_state);
342 }
343
bigjoiner_master_pipe(const struct intel_crtc_state * crtc_state)344 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
345 {
346 return ffs(crtc_state->bigjoiner_pipes) - 1;
347 }
348
intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state * crtc_state)349 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
350 {
351 if (crtc_state->bigjoiner_pipes)
352 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
353 else
354 return 0;
355 }
356
intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state * crtc_state)357 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
358 {
359 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
360
361 return crtc_state->bigjoiner_pipes &&
362 crtc->pipe != bigjoiner_master_pipe(crtc_state);
363 }
364
intel_crtc_is_bigjoiner_master(const struct intel_crtc_state * crtc_state)365 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
366 {
367 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
368
369 return crtc_state->bigjoiner_pipes &&
370 crtc->pipe == bigjoiner_master_pipe(crtc_state);
371 }
372
intel_bigjoiner_num_pipes(const struct intel_crtc_state * crtc_state)373 static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
374 {
375 return hweight8(crtc_state->bigjoiner_pipes);
376 }
377
intel_master_crtc(const struct intel_crtc_state * crtc_state)378 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
379 {
380 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
381
382 if (intel_crtc_is_bigjoiner_slave(crtc_state))
383 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
384 else
385 return to_intel_crtc(crtc_state->uapi.crtc);
386 }
387
pipe_scanline_is_moving(struct drm_i915_private * dev_priv,enum pipe pipe)388 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
389 enum pipe pipe)
390 {
391 i915_reg_t reg = PIPEDSL(pipe);
392 u32 line1, line2;
393
394 line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
395 msleep(5);
396 line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
397
398 return line1 != line2;
399 }
400
wait_for_pipe_scanline_moving(struct intel_crtc * crtc,bool state)401 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
402 {
403 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
404 enum pipe pipe = crtc->pipe;
405
406 /* Wait for the display line to settle/start moving */
407 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
408 drm_err(&dev_priv->drm,
409 "pipe %c scanline %s wait timed out\n",
410 pipe_name(pipe), str_on_off(state));
411 }
412
intel_wait_for_pipe_scanline_stopped(struct intel_crtc * crtc)413 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
414 {
415 wait_for_pipe_scanline_moving(crtc, false);
416 }
417
intel_wait_for_pipe_scanline_moving(struct intel_crtc * crtc)418 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
419 {
420 wait_for_pipe_scanline_moving(crtc, true);
421 }
422
423 static void
intel_wait_for_pipe_off(const struct intel_crtc_state * old_crtc_state)424 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
425 {
426 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
428
429 if (DISPLAY_VER(dev_priv) >= 4) {
430 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
431
432 /* Wait for the Pipe State to go off */
433 if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
434 PIPECONF_STATE_ENABLE, 100))
435 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
436 } else {
437 intel_wait_for_pipe_scanline_stopped(crtc);
438 }
439 }
440
assert_transcoder(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,bool state)441 void assert_transcoder(struct drm_i915_private *dev_priv,
442 enum transcoder cpu_transcoder, bool state)
443 {
444 bool cur_state;
445 enum intel_display_power_domain power_domain;
446 intel_wakeref_t wakeref;
447
448 /* we keep both pipes enabled on 830 */
449 if (IS_I830(dev_priv))
450 state = true;
451
452 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
453 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
454 if (wakeref) {
455 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
456 cur_state = !!(val & PIPECONF_ENABLE);
457
458 intel_display_power_put(dev_priv, power_domain, wakeref);
459 } else {
460 cur_state = false;
461 }
462
463 I915_STATE_WARN(cur_state != state,
464 "transcoder %s assertion failure (expected %s, current %s)\n",
465 transcoder_name(cpu_transcoder),
466 str_on_off(state), str_on_off(cur_state));
467 }
468
assert_plane(struct intel_plane * plane,bool state)469 static void assert_plane(struct intel_plane *plane, bool state)
470 {
471 enum pipe pipe;
472 bool cur_state;
473
474 cur_state = plane->get_hw_state(plane, &pipe);
475
476 I915_STATE_WARN(cur_state != state,
477 "%s assertion failure (expected %s, current %s)\n",
478 plane->base.name, str_on_off(state),
479 str_on_off(cur_state));
480 }
481
482 #define assert_plane_enabled(p) assert_plane(p, true)
483 #define assert_plane_disabled(p) assert_plane(p, false)
484
assert_planes_disabled(struct intel_crtc * crtc)485 static void assert_planes_disabled(struct intel_crtc *crtc)
486 {
487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
488 struct intel_plane *plane;
489
490 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
491 assert_plane_disabled(plane);
492 }
493
vlv_wait_port_ready(struct drm_i915_private * dev_priv,struct intel_digital_port * dig_port,unsigned int expected_mask)494 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
495 struct intel_digital_port *dig_port,
496 unsigned int expected_mask)
497 {
498 u32 port_mask;
499 i915_reg_t dpll_reg;
500
501 switch (dig_port->base.port) {
502 default:
503 MISSING_CASE(dig_port->base.port);
504 fallthrough;
505 case PORT_B:
506 port_mask = DPLL_PORTB_READY_MASK;
507 dpll_reg = DPLL(0);
508 break;
509 case PORT_C:
510 port_mask = DPLL_PORTC_READY_MASK;
511 dpll_reg = DPLL(0);
512 expected_mask <<= 4;
513 break;
514 case PORT_D:
515 port_mask = DPLL_PORTD_READY_MASK;
516 dpll_reg = DPIO_PHY_STATUS;
517 break;
518 }
519
520 if (intel_de_wait_for_register(dev_priv, dpll_reg,
521 port_mask, expected_mask, 1000))
522 drm_WARN(&dev_priv->drm, 1,
523 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
524 dig_port->base.base.base.id, dig_port->base.base.name,
525 intel_de_read(dev_priv, dpll_reg) & port_mask,
526 expected_mask);
527 }
528
intel_enable_transcoder(const struct intel_crtc_state * new_crtc_state)529 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
530 {
531 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
532 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
533 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
534 enum pipe pipe = crtc->pipe;
535 i915_reg_t reg;
536 u32 val;
537
538 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
539
540 assert_planes_disabled(crtc);
541
542 /*
543 * A pipe without a PLL won't actually be able to drive bits from
544 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
545 * need the check.
546 */
547 if (HAS_GMCH(dev_priv)) {
548 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
549 assert_dsi_pll_enabled(dev_priv);
550 else
551 assert_pll_enabled(dev_priv, pipe);
552 } else {
553 if (new_crtc_state->has_pch_encoder) {
554 /* if driving the PCH, we need FDI enabled */
555 assert_fdi_rx_pll_enabled(dev_priv,
556 intel_crtc_pch_transcoder(crtc));
557 assert_fdi_tx_pll_enabled(dev_priv,
558 (enum pipe) cpu_transcoder);
559 }
560 /* FIXME: assert CPU port conditions for SNB+ */
561 }
562
563 /* Wa_22012358565:adl-p */
564 if (DISPLAY_VER(dev_priv) == 13)
565 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
566 0, PIPE_ARB_USE_PROG_SLOTS);
567
568 reg = PIPECONF(cpu_transcoder);
569 val = intel_de_read(dev_priv, reg);
570 if (val & PIPECONF_ENABLE) {
571 /* we keep both pipes enabled on 830 */
572 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
573 return;
574 }
575
576 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
577 intel_de_posting_read(dev_priv, reg);
578
579 /*
580 * Until the pipe starts PIPEDSL reads will return a stale value,
581 * which causes an apparent vblank timestamp jump when PIPEDSL
582 * resets to its proper value. That also messes up the frame count
583 * when it's derived from the timestamps. So let's wait for the
584 * pipe to start properly before we call drm_crtc_vblank_on()
585 */
586 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
587 intel_wait_for_pipe_scanline_moving(crtc);
588 }
589
intel_disable_transcoder(const struct intel_crtc_state * old_crtc_state)590 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
591 {
592 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
593 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
594 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
595 enum pipe pipe = crtc->pipe;
596 i915_reg_t reg;
597 u32 val;
598
599 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
600
601 /*
602 * Make sure planes won't keep trying to pump pixels to us,
603 * or we might hang the display.
604 */
605 assert_planes_disabled(crtc);
606
607 reg = PIPECONF(cpu_transcoder);
608 val = intel_de_read(dev_priv, reg);
609 if ((val & PIPECONF_ENABLE) == 0)
610 return;
611
612 /*
613 * Double wide has implications for planes
614 * so best keep it disabled when not needed.
615 */
616 if (old_crtc_state->double_wide)
617 val &= ~PIPECONF_DOUBLE_WIDE;
618
619 /* Don't disable pipe or pipe PLLs if needed */
620 if (!IS_I830(dev_priv))
621 val &= ~PIPECONF_ENABLE;
622
623 if (DISPLAY_VER(dev_priv) >= 14)
624 intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
625 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
626 else if (DISPLAY_VER(dev_priv) >= 12)
627 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
628 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
629
630 intel_de_write(dev_priv, reg, val);
631 if ((val & PIPECONF_ENABLE) == 0)
632 intel_wait_for_pipe_off(old_crtc_state);
633 }
634
intel_rotation_info_size(const struct intel_rotation_info * rot_info)635 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
636 {
637 unsigned int size = 0;
638 int i;
639
640 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
641 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
642
643 return size;
644 }
645
intel_remapped_info_size(const struct intel_remapped_info * rem_info)646 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
647 {
648 unsigned int size = 0;
649 int i;
650
651 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
652 unsigned int plane_size;
653
654 if (rem_info->plane[i].linear)
655 plane_size = rem_info->plane[i].size;
656 else
657 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
658
659 if (plane_size == 0)
660 continue;
661
662 if (rem_info->plane_alignment)
663 size = ALIGN(size, rem_info->plane_alignment);
664
665 size += plane_size;
666 }
667
668 return size;
669 }
670
intel_plane_uses_fence(const struct intel_plane_state * plane_state)671 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
672 {
673 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
674 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
675
676 return DISPLAY_VER(dev_priv) < 4 ||
677 (plane->fbc &&
678 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
679 }
680
681 /*
682 * Convert the x/y offsets into a linear offset.
683 * Only valid with 0/180 degree rotation, which is fine since linear
684 * offset is only used with linear buffers on pre-hsw and tiled buffers
685 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
686 */
intel_fb_xy_to_linear(int x,int y,const struct intel_plane_state * state,int color_plane)687 u32 intel_fb_xy_to_linear(int x, int y,
688 const struct intel_plane_state *state,
689 int color_plane)
690 {
691 const struct drm_framebuffer *fb = state->hw.fb;
692 unsigned int cpp = fb->format->cpp[color_plane];
693 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
694
695 return y * pitch + x * cpp;
696 }
697
698 /*
699 * Add the x/y offsets derived from fb->offsets[] to the user
700 * specified plane src x/y offsets. The resulting x/y offsets
701 * specify the start of scanout from the beginning of the gtt mapping.
702 */
intel_add_fb_offsets(int * x,int * y,const struct intel_plane_state * state,int color_plane)703 void intel_add_fb_offsets(int *x, int *y,
704 const struct intel_plane_state *state,
705 int color_plane)
706
707 {
708 *x += state->view.color_plane[color_plane].x;
709 *y += state->view.color_plane[color_plane].y;
710 }
711
intel_plane_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)712 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
713 u32 pixel_format, u64 modifier)
714 {
715 struct intel_crtc *crtc;
716 struct intel_plane *plane;
717
718 if (!HAS_DISPLAY(dev_priv))
719 return 0;
720
721 /*
722 * We assume the primary plane for pipe A has
723 * the highest stride limits of them all,
724 * if in case pipe A is disabled, use the first pipe from pipe_mask.
725 */
726 crtc = intel_first_crtc(dev_priv);
727 if (!crtc)
728 return 0;
729
730 plane = to_intel_plane(crtc->base.primary);
731
732 return plane->max_stride(plane, pixel_format, modifier,
733 DRM_MODE_ROTATE_0);
734 }
735
intel_set_plane_visible(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state,bool visible)736 void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
737 struct intel_plane_state *plane_state,
738 bool visible)
739 {
740 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
741
742 plane_state->uapi.visible = visible;
743
744 if (visible)
745 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
746 else
747 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
748 }
749
intel_plane_fixup_bitmasks(struct intel_crtc_state * crtc_state)750 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
751 {
752 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
753 struct drm_plane *plane;
754
755 /*
756 * Active_planes aliases if multiple "primary" or cursor planes
757 * have been used on the same (or wrong) pipe. plane_mask uses
758 * unique ids, hence we can use that to reconstruct active_planes.
759 */
760 crtc_state->enabled_planes = 0;
761 crtc_state->active_planes = 0;
762
763 drm_for_each_plane_mask(plane, &dev_priv->drm,
764 crtc_state->uapi.plane_mask) {
765 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
766 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
767 }
768 }
769
intel_plane_disable_noatomic(struct intel_crtc * crtc,struct intel_plane * plane)770 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
771 struct intel_plane *plane)
772 {
773 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
774 struct intel_crtc_state *crtc_state =
775 to_intel_crtc_state(crtc->base.state);
776 struct intel_plane_state *plane_state =
777 to_intel_plane_state(plane->base.state);
778
779 drm_dbg_kms(&dev_priv->drm,
780 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
781 plane->base.base.id, plane->base.name,
782 crtc->base.base.id, crtc->base.name);
783
784 intel_set_plane_visible(crtc_state, plane_state, false);
785 intel_plane_fixup_bitmasks(crtc_state);
786 crtc_state->data_rate[plane->id] = 0;
787 crtc_state->data_rate_y[plane->id] = 0;
788 crtc_state->rel_data_rate[plane->id] = 0;
789 crtc_state->rel_data_rate_y[plane->id] = 0;
790 crtc_state->min_cdclk[plane->id] = 0;
791
792 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
793 hsw_ips_disable(crtc_state)) {
794 crtc_state->ips_enabled = false;
795 intel_crtc_wait_for_next_vblank(crtc);
796 }
797
798 /*
799 * Vblank time updates from the shadow to live plane control register
800 * are blocked if the memory self-refresh mode is active at that
801 * moment. So to make sure the plane gets truly disabled, disable
802 * first the self-refresh mode. The self-refresh enable bit in turn
803 * will be checked/applied by the HW only at the next frame start
804 * event which is after the vblank start event, so we need to have a
805 * wait-for-vblank between disabling the plane and the pipe.
806 */
807 if (HAS_GMCH(dev_priv) &&
808 intel_set_memory_cxsr(dev_priv, false))
809 intel_crtc_wait_for_next_vblank(crtc);
810
811 /*
812 * Gen2 reports pipe underruns whenever all planes are disabled.
813 * So disable underrun reporting before all the planes get disabled.
814 */
815 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
816 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
817
818 intel_plane_disable_arm(plane, crtc_state);
819 intel_crtc_wait_for_next_vblank(crtc);
820 }
821
822 unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state * plane_state)823 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
824 {
825 int x = 0, y = 0;
826
827 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
828 plane_state->view.color_plane[0].offset, 0);
829
830 return y;
831 }
832
833 static int
__intel_display_resume(struct drm_i915_private * i915,struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)834 __intel_display_resume(struct drm_i915_private *i915,
835 struct drm_atomic_state *state,
836 struct drm_modeset_acquire_ctx *ctx)
837 {
838 struct drm_crtc_state *crtc_state;
839 struct drm_crtc *crtc;
840 int i, ret;
841
842 intel_modeset_setup_hw_state(i915, ctx);
843 intel_vga_redisable(i915);
844
845 if (!state)
846 return 0;
847
848 /*
849 * We've duplicated the state, pointers to the old state are invalid.
850 *
851 * Don't attempt to use the old state until we commit the duplicated state.
852 */
853 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
854 /*
855 * Force recalculation even if we restore
856 * current state. With fast modeset this may not result
857 * in a modeset when the state is compatible.
858 */
859 crtc_state->mode_changed = true;
860 }
861
862 /* ignore any reset values/BIOS leftovers in the WM registers */
863 if (!HAS_GMCH(i915))
864 to_intel_atomic_state(state)->skip_intermediate_wm = true;
865
866 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
867
868 drm_WARN_ON(&i915->drm, ret == -EDEADLK);
869
870 return ret;
871 }
872
gpu_reset_clobbers_display(struct drm_i915_private * dev_priv)873 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
874 {
875 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
876 intel_has_gpu_reset(to_gt(dev_priv)));
877 }
878
intel_display_prepare_reset(struct drm_i915_private * dev_priv)879 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
880 {
881 struct drm_device *dev = &dev_priv->drm;
882 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
883 struct drm_atomic_state *state;
884 int ret;
885
886 if (!HAS_DISPLAY(dev_priv))
887 return;
888
889 /* reset doesn't touch the display */
890 if (!dev_priv->params.force_reset_modeset_test &&
891 !gpu_reset_clobbers_display(dev_priv))
892 return;
893
894 /* We have a modeset vs reset deadlock, defensively unbreak it. */
895 set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
896 smp_mb__after_atomic();
897 wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
898
899 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
900 drm_dbg_kms(&dev_priv->drm,
901 "Modeset potentially stuck, unbreaking through wedging\n");
902 intel_gt_set_wedged(to_gt(dev_priv));
903 }
904
905 /*
906 * Need mode_config.mutex so that we don't
907 * trample ongoing ->detect() and whatnot.
908 */
909 mutex_lock(&dev->mode_config.mutex);
910 drm_modeset_acquire_init(ctx, 0);
911 while (1) {
912 ret = drm_modeset_lock_all_ctx(dev, ctx);
913 if (ret != -EDEADLK)
914 break;
915
916 drm_modeset_backoff(ctx);
917 }
918 /*
919 * Disabling the crtcs gracefully seems nicer. Also the
920 * g33 docs say we should at least disable all the planes.
921 */
922 state = drm_atomic_helper_duplicate_state(dev, ctx);
923 if (IS_ERR(state)) {
924 ret = PTR_ERR(state);
925 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
926 ret);
927 return;
928 }
929
930 ret = drm_atomic_helper_disable_all(dev, ctx);
931 if (ret) {
932 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
933 ret);
934 drm_atomic_state_put(state);
935 return;
936 }
937
938 dev_priv->modeset_restore_state = state;
939 state->acquire_ctx = ctx;
940 }
941
intel_display_finish_reset(struct drm_i915_private * i915)942 void intel_display_finish_reset(struct drm_i915_private *i915)
943 {
944 struct drm_modeset_acquire_ctx *ctx = &i915->reset_ctx;
945 struct drm_atomic_state *state;
946 int ret;
947
948 if (!HAS_DISPLAY(i915))
949 return;
950
951 /* reset doesn't touch the display */
952 if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
953 return;
954
955 state = fetch_and_zero(&i915->modeset_restore_state);
956 if (!state)
957 goto unlock;
958
959 /* reset doesn't touch the display */
960 if (!gpu_reset_clobbers_display(i915)) {
961 /* for testing only restore the display */
962 ret = __intel_display_resume(i915, state, ctx);
963 if (ret)
964 drm_err(&i915->drm,
965 "Restoring old state failed with %i\n", ret);
966 } else {
967 /*
968 * The display has been reset as well,
969 * so need a full re-initialization.
970 */
971 intel_pps_unlock_regs_wa(i915);
972 intel_modeset_init_hw(i915);
973 intel_init_clock_gating(i915);
974 intel_hpd_init(i915);
975
976 ret = __intel_display_resume(i915, state, ctx);
977 if (ret)
978 drm_err(&i915->drm,
979 "Restoring old state failed with %i\n", ret);
980
981 intel_hpd_poll_disable(i915);
982 }
983
984 drm_atomic_state_put(state);
985 unlock:
986 drm_modeset_drop_locks(ctx);
987 drm_modeset_acquire_fini(ctx);
988 mutex_unlock(&i915->drm.mode_config.mutex);
989
990 clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
991 }
992
icl_set_pipe_chicken(const struct intel_crtc_state * crtc_state)993 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
994 {
995 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
996 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
997 enum pipe pipe = crtc->pipe;
998 u32 tmp;
999
1000 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1001
1002 /*
1003 * Display WA #1153: icl
1004 * enable hardware to bypass the alpha math
1005 * and rounding for per-pixel values 00 and 0xff
1006 */
1007 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1008 /*
1009 * Display WA # 1605353570: icl
1010 * Set the pixel rounding bit to 1 for allowing
1011 * passthrough of Frame buffer pixels unmodified
1012 * across pipe
1013 */
1014 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1015
1016 /*
1017 * Underrun recovery must always be disabled on display 13+.
1018 * DG2 chicken bit meaning is inverted compared to other platforms.
1019 */
1020 if (IS_DG2(dev_priv))
1021 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
1022 else if (DISPLAY_VER(dev_priv) >= 13)
1023 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
1024
1025 /* Wa_14010547955:dg2 */
1026 if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
1027 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
1028
1029 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1030 }
1031
intel_has_pending_fb_unpin(struct drm_i915_private * dev_priv)1032 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1033 {
1034 struct drm_crtc *crtc;
1035 bool cleanup_done;
1036
1037 drm_for_each_crtc(crtc, &dev_priv->drm) {
1038 struct drm_crtc_commit *commit;
1039 spin_lock(&crtc->commit_lock);
1040 commit = list_first_entry_or_null(&crtc->commit_list,
1041 struct drm_crtc_commit, commit_entry);
1042 cleanup_done = commit ?
1043 try_wait_for_completion(&commit->cleanup_done) : true;
1044 spin_unlock(&crtc->commit_lock);
1045
1046 if (cleanup_done)
1047 continue;
1048
1049 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1050
1051 return true;
1052 }
1053
1054 return false;
1055 }
1056
1057 /*
1058 * Finds the encoder associated with the given CRTC. This can only be
1059 * used when we know that the CRTC isn't feeding multiple encoders!
1060 */
1061 struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)1062 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1063 const struct intel_crtc_state *crtc_state)
1064 {
1065 const struct drm_connector_state *connector_state;
1066 const struct drm_connector *connector;
1067 struct intel_encoder *encoder = NULL;
1068 struct intel_crtc *master_crtc;
1069 int num_encoders = 0;
1070 int i;
1071
1072 master_crtc = intel_master_crtc(crtc_state);
1073
1074 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1075 if (connector_state->crtc != &master_crtc->base)
1076 continue;
1077
1078 encoder = to_intel_encoder(connector_state->best_encoder);
1079 num_encoders++;
1080 }
1081
1082 drm_WARN(encoder->base.dev, num_encoders != 1,
1083 "%d encoders for pipe %c\n",
1084 num_encoders, pipe_name(master_crtc->pipe));
1085
1086 return encoder;
1087 }
1088
cpt_verify_modeset(struct drm_i915_private * dev_priv,enum pipe pipe)1089 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1090 enum pipe pipe)
1091 {
1092 i915_reg_t dslreg = PIPEDSL(pipe);
1093 u32 temp;
1094
1095 temp = intel_de_read(dev_priv, dslreg);
1096 udelay(500);
1097 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1098 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1099 drm_err(&dev_priv->drm,
1100 "mode set failed: pipe %c stuck\n",
1101 pipe_name(pipe));
1102 }
1103 }
1104
ilk_pfit_enable(const struct intel_crtc_state * crtc_state)1105 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1106 {
1107 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1108 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1109 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1110 enum pipe pipe = crtc->pipe;
1111 int width = drm_rect_width(dst);
1112 int height = drm_rect_height(dst);
1113 int x = dst->x1;
1114 int y = dst->y1;
1115
1116 if (!crtc_state->pch_pfit.enabled)
1117 return;
1118
1119 /* Force use of hard-coded filter coefficients
1120 * as some pre-programmed values are broken,
1121 * e.g. x201.
1122 */
1123 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1124 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
1125 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1126 else
1127 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
1128 PF_FILTER_MED_3x3);
1129 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1130 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1131 }
1132
intel_crtc_dpms_overlay_disable(struct intel_crtc * crtc)1133 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1134 {
1135 if (crtc->overlay)
1136 (void) intel_overlay_switch_off(crtc->overlay);
1137
1138 /* Let userspace switch the overlay on again. In most cases userspace
1139 * has to recompute where to put it anyway.
1140 */
1141 }
1142
needs_nv12_wa(const struct intel_crtc_state * crtc_state)1143 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1144 {
1145 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1146
1147 if (!crtc_state->nv12_planes)
1148 return false;
1149
1150 /* WA Display #0827: Gen9:all */
1151 if (DISPLAY_VER(dev_priv) == 9)
1152 return true;
1153
1154 return false;
1155 }
1156
needs_scalerclk_wa(const struct intel_crtc_state * crtc_state)1157 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1158 {
1159 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1160
1161 /* Wa_2006604312:icl,ehl */
1162 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1163 return true;
1164
1165 return false;
1166 }
1167
needs_cursorclk_wa(const struct intel_crtc_state * crtc_state)1168 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1169 {
1170 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1171
1172 /* Wa_1604331009:icl,jsl,ehl */
1173 if (is_hdr_mode(crtc_state) &&
1174 crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1175 DISPLAY_VER(dev_priv) == 11)
1176 return true;
1177
1178 return false;
1179 }
1180
intel_async_flip_vtd_wa(struct drm_i915_private * i915,enum pipe pipe,bool enable)1181 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1182 enum pipe pipe, bool enable)
1183 {
1184 if (DISPLAY_VER(i915) == 9) {
1185 /*
1186 * "Plane N strech max must be programmed to 11b (x1)
1187 * when Async flips are enabled on that plane."
1188 */
1189 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1190 SKL_PLANE1_STRETCH_MAX_MASK,
1191 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1192 } else {
1193 /* Also needed on HSW/BDW albeit undocumented */
1194 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1195 HSW_PRI_STRETCH_MAX_MASK,
1196 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1197 }
1198 }
1199
needs_async_flip_vtd_wa(const struct intel_crtc_state * crtc_state)1200 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1201 {
1202 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1203
1204 return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
1205 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1206 }
1207
planes_enabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1208 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1209 const struct intel_crtc_state *new_crtc_state)
1210 {
1211 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1212 new_crtc_state->active_planes;
1213 }
1214
planes_disabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1215 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1216 const struct intel_crtc_state *new_crtc_state)
1217 {
1218 return old_crtc_state->active_planes &&
1219 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1220 }
1221
intel_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1222 static void intel_post_plane_update(struct intel_atomic_state *state,
1223 struct intel_crtc *crtc)
1224 {
1225 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1226 const struct intel_crtc_state *old_crtc_state =
1227 intel_atomic_get_old_crtc_state(state, crtc);
1228 const struct intel_crtc_state *new_crtc_state =
1229 intel_atomic_get_new_crtc_state(state, crtc);
1230 enum pipe pipe = crtc->pipe;
1231
1232 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1233
1234 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1235 intel_update_watermarks(dev_priv);
1236
1237 hsw_ips_post_update(state, crtc);
1238 intel_fbc_post_update(state, crtc);
1239
1240 if (needs_async_flip_vtd_wa(old_crtc_state) &&
1241 !needs_async_flip_vtd_wa(new_crtc_state))
1242 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1243
1244 if (needs_nv12_wa(old_crtc_state) &&
1245 !needs_nv12_wa(new_crtc_state))
1246 skl_wa_827(dev_priv, pipe, false);
1247
1248 if (needs_scalerclk_wa(old_crtc_state) &&
1249 !needs_scalerclk_wa(new_crtc_state))
1250 icl_wa_scalerclkgating(dev_priv, pipe, false);
1251
1252 if (needs_cursorclk_wa(old_crtc_state) &&
1253 !needs_cursorclk_wa(new_crtc_state))
1254 icl_wa_cursorclkgating(dev_priv, pipe, false);
1255
1256 intel_drrs_activate(new_crtc_state);
1257 }
1258
intel_crtc_enable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)1259 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1260 struct intel_crtc *crtc)
1261 {
1262 const struct intel_crtc_state *crtc_state =
1263 intel_atomic_get_new_crtc_state(state, crtc);
1264 u8 update_planes = crtc_state->update_planes;
1265 const struct intel_plane_state *plane_state;
1266 struct intel_plane *plane;
1267 int i;
1268
1269 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1270 if (plane->pipe == crtc->pipe &&
1271 update_planes & BIT(plane->id))
1272 plane->enable_flip_done(plane);
1273 }
1274 }
1275
intel_crtc_disable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)1276 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1277 struct intel_crtc *crtc)
1278 {
1279 const struct intel_crtc_state *crtc_state =
1280 intel_atomic_get_new_crtc_state(state, crtc);
1281 u8 update_planes = crtc_state->update_planes;
1282 const struct intel_plane_state *plane_state;
1283 struct intel_plane *plane;
1284 int i;
1285
1286 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1287 if (plane->pipe == crtc->pipe &&
1288 update_planes & BIT(plane->id))
1289 plane->disable_flip_done(plane);
1290 }
1291 }
1292
intel_crtc_async_flip_disable_wa(struct intel_atomic_state * state,struct intel_crtc * crtc)1293 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1294 struct intel_crtc *crtc)
1295 {
1296 const struct intel_crtc_state *old_crtc_state =
1297 intel_atomic_get_old_crtc_state(state, crtc);
1298 const struct intel_crtc_state *new_crtc_state =
1299 intel_atomic_get_new_crtc_state(state, crtc);
1300 u8 update_planes = new_crtc_state->update_planes;
1301 const struct intel_plane_state *old_plane_state;
1302 struct intel_plane *plane;
1303 bool need_vbl_wait = false;
1304 int i;
1305
1306 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1307 if (plane->need_async_flip_disable_wa &&
1308 plane->pipe == crtc->pipe &&
1309 update_planes & BIT(plane->id)) {
1310 /*
1311 * Apart from the async flip bit we want to
1312 * preserve the old state for the plane.
1313 */
1314 plane->async_flip(plane, old_crtc_state,
1315 old_plane_state, false);
1316 need_vbl_wait = true;
1317 }
1318 }
1319
1320 if (need_vbl_wait)
1321 intel_crtc_wait_for_next_vblank(crtc);
1322 }
1323
intel_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1324 static void intel_pre_plane_update(struct intel_atomic_state *state,
1325 struct intel_crtc *crtc)
1326 {
1327 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1328 const struct intel_crtc_state *old_crtc_state =
1329 intel_atomic_get_old_crtc_state(state, crtc);
1330 const struct intel_crtc_state *new_crtc_state =
1331 intel_atomic_get_new_crtc_state(state, crtc);
1332 enum pipe pipe = crtc->pipe;
1333
1334 intel_drrs_deactivate(old_crtc_state);
1335
1336 intel_psr_pre_plane_update(state, crtc);
1337
1338 if (hsw_ips_pre_update(state, crtc))
1339 intel_crtc_wait_for_next_vblank(crtc);
1340
1341 if (intel_fbc_pre_update(state, crtc))
1342 intel_crtc_wait_for_next_vblank(crtc);
1343
1344 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1345 needs_async_flip_vtd_wa(new_crtc_state))
1346 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1347
1348 /* Display WA 827 */
1349 if (!needs_nv12_wa(old_crtc_state) &&
1350 needs_nv12_wa(new_crtc_state))
1351 skl_wa_827(dev_priv, pipe, true);
1352
1353 /* Wa_2006604312:icl,ehl */
1354 if (!needs_scalerclk_wa(old_crtc_state) &&
1355 needs_scalerclk_wa(new_crtc_state))
1356 icl_wa_scalerclkgating(dev_priv, pipe, true);
1357
1358 /* Wa_1604331009:icl,jsl,ehl */
1359 if (!needs_cursorclk_wa(old_crtc_state) &&
1360 needs_cursorclk_wa(new_crtc_state))
1361 icl_wa_cursorclkgating(dev_priv, pipe, true);
1362
1363 /*
1364 * Vblank time updates from the shadow to live plane control register
1365 * are blocked if the memory self-refresh mode is active at that
1366 * moment. So to make sure the plane gets truly disabled, disable
1367 * first the self-refresh mode. The self-refresh enable bit in turn
1368 * will be checked/applied by the HW only at the next frame start
1369 * event which is after the vblank start event, so we need to have a
1370 * wait-for-vblank between disabling the plane and the pipe.
1371 */
1372 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1373 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1374 intel_crtc_wait_for_next_vblank(crtc);
1375
1376 /*
1377 * IVB workaround: must disable low power watermarks for at least
1378 * one frame before enabling scaling. LP watermarks can be re-enabled
1379 * when scaling is disabled.
1380 *
1381 * WaCxSRDisabledForSpriteScaling:ivb
1382 */
1383 if (old_crtc_state->hw.active &&
1384 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1385 intel_crtc_wait_for_next_vblank(crtc);
1386
1387 /*
1388 * If we're doing a modeset we don't need to do any
1389 * pre-vblank watermark programming here.
1390 */
1391 if (!intel_crtc_needs_modeset(new_crtc_state)) {
1392 /*
1393 * For platforms that support atomic watermarks, program the
1394 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
1395 * will be the intermediate values that are safe for both pre- and
1396 * post- vblank; when vblank happens, the 'active' values will be set
1397 * to the final 'target' values and we'll do this again to get the
1398 * optimal watermarks. For gen9+ platforms, the values we program here
1399 * will be the final target values which will get automatically latched
1400 * at vblank time; no further programming will be necessary.
1401 *
1402 * If a platform hasn't been transitioned to atomic watermarks yet,
1403 * we'll continue to update watermarks the old way, if flags tell
1404 * us to.
1405 */
1406 if (!intel_initial_watermarks(state, crtc))
1407 if (new_crtc_state->update_wm_pre)
1408 intel_update_watermarks(dev_priv);
1409 }
1410
1411 /*
1412 * Gen2 reports pipe underruns whenever all planes are disabled.
1413 * So disable underrun reporting before all the planes get disabled.
1414 *
1415 * We do this after .initial_watermarks() so that we have a
1416 * chance of catching underruns with the intermediate watermarks
1417 * vs. the old plane configuration.
1418 */
1419 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1420 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1421
1422 /*
1423 * WA for platforms where async address update enable bit
1424 * is double buffered and only latched at start of vblank.
1425 */
1426 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1427 intel_crtc_async_flip_disable_wa(state, crtc);
1428 }
1429
intel_crtc_disable_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)1430 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1431 struct intel_crtc *crtc)
1432 {
1433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1434 const struct intel_crtc_state *new_crtc_state =
1435 intel_atomic_get_new_crtc_state(state, crtc);
1436 unsigned int update_mask = new_crtc_state->update_planes;
1437 const struct intel_plane_state *old_plane_state;
1438 struct intel_plane *plane;
1439 unsigned fb_bits = 0;
1440 int i;
1441
1442 intel_crtc_dpms_overlay_disable(crtc);
1443
1444 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1445 if (crtc->pipe != plane->pipe ||
1446 !(update_mask & BIT(plane->id)))
1447 continue;
1448
1449 intel_plane_disable_arm(plane, new_crtc_state);
1450
1451 if (old_plane_state->uapi.visible)
1452 fb_bits |= plane->frontbuffer_bit;
1453 }
1454
1455 intel_frontbuffer_flip(dev_priv, fb_bits);
1456 }
1457
1458 /*
1459 * intel_connector_primary_encoder - get the primary encoder for a connector
1460 * @connector: connector for which to return the encoder
1461 *
1462 * Returns the primary encoder for a connector. There is a 1:1 mapping from
1463 * all connectors to their encoder, except for DP-MST connectors which have
1464 * both a virtual and a primary encoder. These DP-MST primary encoders can be
1465 * pointed to by as many DP-MST connectors as there are pipes.
1466 */
1467 static struct intel_encoder *
intel_connector_primary_encoder(struct intel_connector * connector)1468 intel_connector_primary_encoder(struct intel_connector *connector)
1469 {
1470 struct intel_encoder *encoder;
1471
1472 if (connector->mst_port)
1473 return &dp_to_dig_port(connector->mst_port)->base;
1474
1475 encoder = intel_attached_encoder(connector);
1476 drm_WARN_ON(connector->base.dev, !encoder);
1477
1478 return encoder;
1479 }
1480
intel_encoders_update_prepare(struct intel_atomic_state * state)1481 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1482 {
1483 struct drm_i915_private *i915 = to_i915(state->base.dev);
1484 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1485 struct intel_crtc *crtc;
1486 struct drm_connector_state *new_conn_state;
1487 struct drm_connector *connector;
1488 int i;
1489
1490 /*
1491 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1492 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1493 */
1494 if (i915->display.dpll.mgr) {
1495 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1496 if (intel_crtc_needs_modeset(new_crtc_state))
1497 continue;
1498
1499 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1500 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1501 }
1502 }
1503
1504 if (!state->modeset)
1505 return;
1506
1507 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1508 i) {
1509 struct intel_connector *intel_connector;
1510 struct intel_encoder *encoder;
1511 struct intel_crtc *crtc;
1512
1513 if (!intel_connector_needs_modeset(state, connector))
1514 continue;
1515
1516 intel_connector = to_intel_connector(connector);
1517 encoder = intel_connector_primary_encoder(intel_connector);
1518 if (!encoder->update_prepare)
1519 continue;
1520
1521 crtc = new_conn_state->crtc ?
1522 to_intel_crtc(new_conn_state->crtc) : NULL;
1523 encoder->update_prepare(state, encoder, crtc);
1524 }
1525 }
1526
intel_encoders_update_complete(struct intel_atomic_state * state)1527 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1528 {
1529 struct drm_connector_state *new_conn_state;
1530 struct drm_connector *connector;
1531 int i;
1532
1533 if (!state->modeset)
1534 return;
1535
1536 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1537 i) {
1538 struct intel_connector *intel_connector;
1539 struct intel_encoder *encoder;
1540 struct intel_crtc *crtc;
1541
1542 if (!intel_connector_needs_modeset(state, connector))
1543 continue;
1544
1545 intel_connector = to_intel_connector(connector);
1546 encoder = intel_connector_primary_encoder(intel_connector);
1547 if (!encoder->update_complete)
1548 continue;
1549
1550 crtc = new_conn_state->crtc ?
1551 to_intel_crtc(new_conn_state->crtc) : NULL;
1552 encoder->update_complete(state, encoder, crtc);
1553 }
1554 }
1555
intel_encoders_pre_pll_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1556 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1557 struct intel_crtc *crtc)
1558 {
1559 const struct intel_crtc_state *crtc_state =
1560 intel_atomic_get_new_crtc_state(state, crtc);
1561 const struct drm_connector_state *conn_state;
1562 struct drm_connector *conn;
1563 int i;
1564
1565 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1566 struct intel_encoder *encoder =
1567 to_intel_encoder(conn_state->best_encoder);
1568
1569 if (conn_state->crtc != &crtc->base)
1570 continue;
1571
1572 if (encoder->pre_pll_enable)
1573 encoder->pre_pll_enable(state, encoder,
1574 crtc_state, conn_state);
1575 }
1576 }
1577
intel_encoders_pre_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1578 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1579 struct intel_crtc *crtc)
1580 {
1581 const struct intel_crtc_state *crtc_state =
1582 intel_atomic_get_new_crtc_state(state, crtc);
1583 const struct drm_connector_state *conn_state;
1584 struct drm_connector *conn;
1585 int i;
1586
1587 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1588 struct intel_encoder *encoder =
1589 to_intel_encoder(conn_state->best_encoder);
1590
1591 if (conn_state->crtc != &crtc->base)
1592 continue;
1593
1594 if (encoder->pre_enable)
1595 encoder->pre_enable(state, encoder,
1596 crtc_state, conn_state);
1597 }
1598 }
1599
intel_encoders_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1600 static void intel_encoders_enable(struct intel_atomic_state *state,
1601 struct intel_crtc *crtc)
1602 {
1603 const struct intel_crtc_state *crtc_state =
1604 intel_atomic_get_new_crtc_state(state, crtc);
1605 const struct drm_connector_state *conn_state;
1606 struct drm_connector *conn;
1607 int i;
1608
1609 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1610 struct intel_encoder *encoder =
1611 to_intel_encoder(conn_state->best_encoder);
1612
1613 if (conn_state->crtc != &crtc->base)
1614 continue;
1615
1616 if (encoder->enable)
1617 encoder->enable(state, encoder,
1618 crtc_state, conn_state);
1619 intel_opregion_notify_encoder(encoder, true);
1620 }
1621 }
1622
intel_encoders_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1623 static void intel_encoders_disable(struct intel_atomic_state *state,
1624 struct intel_crtc *crtc)
1625 {
1626 const struct intel_crtc_state *old_crtc_state =
1627 intel_atomic_get_old_crtc_state(state, crtc);
1628 const struct drm_connector_state *old_conn_state;
1629 struct drm_connector *conn;
1630 int i;
1631
1632 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1633 struct intel_encoder *encoder =
1634 to_intel_encoder(old_conn_state->best_encoder);
1635
1636 if (old_conn_state->crtc != &crtc->base)
1637 continue;
1638
1639 intel_opregion_notify_encoder(encoder, false);
1640 if (encoder->disable)
1641 encoder->disable(state, encoder,
1642 old_crtc_state, old_conn_state);
1643 }
1644 }
1645
intel_encoders_post_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1646 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1647 struct intel_crtc *crtc)
1648 {
1649 const struct intel_crtc_state *old_crtc_state =
1650 intel_atomic_get_old_crtc_state(state, crtc);
1651 const struct drm_connector_state *old_conn_state;
1652 struct drm_connector *conn;
1653 int i;
1654
1655 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1656 struct intel_encoder *encoder =
1657 to_intel_encoder(old_conn_state->best_encoder);
1658
1659 if (old_conn_state->crtc != &crtc->base)
1660 continue;
1661
1662 if (encoder->post_disable)
1663 encoder->post_disable(state, encoder,
1664 old_crtc_state, old_conn_state);
1665 }
1666 }
1667
intel_encoders_post_pll_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1668 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1669 struct intel_crtc *crtc)
1670 {
1671 const struct intel_crtc_state *old_crtc_state =
1672 intel_atomic_get_old_crtc_state(state, crtc);
1673 const struct drm_connector_state *old_conn_state;
1674 struct drm_connector *conn;
1675 int i;
1676
1677 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1678 struct intel_encoder *encoder =
1679 to_intel_encoder(old_conn_state->best_encoder);
1680
1681 if (old_conn_state->crtc != &crtc->base)
1682 continue;
1683
1684 if (encoder->post_pll_disable)
1685 encoder->post_pll_disable(state, encoder,
1686 old_crtc_state, old_conn_state);
1687 }
1688 }
1689
intel_encoders_update_pipe(struct intel_atomic_state * state,struct intel_crtc * crtc)1690 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1691 struct intel_crtc *crtc)
1692 {
1693 const struct intel_crtc_state *crtc_state =
1694 intel_atomic_get_new_crtc_state(state, crtc);
1695 const struct drm_connector_state *conn_state;
1696 struct drm_connector *conn;
1697 int i;
1698
1699 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1700 struct intel_encoder *encoder =
1701 to_intel_encoder(conn_state->best_encoder);
1702
1703 if (conn_state->crtc != &crtc->base)
1704 continue;
1705
1706 if (encoder->update_pipe)
1707 encoder->update_pipe(state, encoder,
1708 crtc_state, conn_state);
1709 }
1710 }
1711
intel_disable_primary_plane(const struct intel_crtc_state * crtc_state)1712 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1713 {
1714 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1715 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1716
1717 plane->disable_arm(plane, crtc_state);
1718 }
1719
ilk_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)1720 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1721 {
1722 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1723 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1724
1725 if (crtc_state->has_pch_encoder) {
1726 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1727 &crtc_state->fdi_m_n);
1728 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1729 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1730 &crtc_state->dp_m_n);
1731 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1732 &crtc_state->dp_m2_n2);
1733 }
1734
1735 intel_set_transcoder_timings(crtc_state);
1736
1737 ilk_set_pipeconf(crtc_state);
1738 }
1739
ilk_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1740 static void ilk_crtc_enable(struct intel_atomic_state *state,
1741 struct intel_crtc *crtc)
1742 {
1743 const struct intel_crtc_state *new_crtc_state =
1744 intel_atomic_get_new_crtc_state(state, crtc);
1745 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1746 enum pipe pipe = crtc->pipe;
1747
1748 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1749 return;
1750
1751 /*
1752 * Sometimes spurious CPU pipe underruns happen during FDI
1753 * training, at least with VGA+HDMI cloning. Suppress them.
1754 *
1755 * On ILK we get an occasional spurious CPU pipe underruns
1756 * between eDP port A enable and vdd enable. Also PCH port
1757 * enable seems to result in the occasional CPU pipe underrun.
1758 *
1759 * Spurious PCH underruns also occur during PCH enabling.
1760 */
1761 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1762 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1763
1764 ilk_configure_cpu_transcoder(new_crtc_state);
1765
1766 intel_set_pipe_src_size(new_crtc_state);
1767
1768 crtc->active = true;
1769
1770 intel_encoders_pre_enable(state, crtc);
1771
1772 if (new_crtc_state->has_pch_encoder) {
1773 ilk_pch_pre_enable(state, crtc);
1774 } else {
1775 assert_fdi_tx_disabled(dev_priv, pipe);
1776 assert_fdi_rx_disabled(dev_priv, pipe);
1777 }
1778
1779 ilk_pfit_enable(new_crtc_state);
1780
1781 /*
1782 * On ILK+ LUT must be loaded before the pipe is running but with
1783 * clocks enabled
1784 */
1785 intel_color_load_luts(new_crtc_state);
1786 intel_color_commit_noarm(new_crtc_state);
1787 intel_color_commit_arm(new_crtc_state);
1788 /* update DSPCNTR to configure gamma for pipe bottom color */
1789 intel_disable_primary_plane(new_crtc_state);
1790
1791 intel_initial_watermarks(state, crtc);
1792 intel_enable_transcoder(new_crtc_state);
1793
1794 if (new_crtc_state->has_pch_encoder)
1795 ilk_pch_enable(state, crtc);
1796
1797 intel_crtc_vblank_on(new_crtc_state);
1798
1799 intel_encoders_enable(state, crtc);
1800
1801 if (HAS_PCH_CPT(dev_priv))
1802 cpt_verify_modeset(dev_priv, pipe);
1803
1804 /*
1805 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1806 * And a second vblank wait is needed at least on ILK with
1807 * some interlaced HDMI modes. Let's do the double wait always
1808 * in case there are more corner cases we don't know about.
1809 */
1810 if (new_crtc_state->has_pch_encoder) {
1811 intel_crtc_wait_for_next_vblank(crtc);
1812 intel_crtc_wait_for_next_vblank(crtc);
1813 }
1814 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1815 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1816 }
1817
glk_pipe_scaler_clock_gating_wa(struct drm_i915_private * dev_priv,enum pipe pipe,bool apply)1818 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1819 enum pipe pipe, bool apply)
1820 {
1821 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1822 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1823
1824 if (apply)
1825 val |= mask;
1826 else
1827 val &= ~mask;
1828
1829 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1830 }
1831
hsw_set_linetime_wm(const struct intel_crtc_state * crtc_state)1832 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1833 {
1834 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1835 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1836
1837 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1838 HSW_LINETIME(crtc_state->linetime) |
1839 HSW_IPS_LINETIME(crtc_state->ips_linetime));
1840 }
1841
hsw_set_frame_start_delay(const struct intel_crtc_state * crtc_state)1842 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1843 {
1844 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1845 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1846 enum transcoder transcoder = crtc_state->cpu_transcoder;
1847 i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
1848 CHICKEN_TRANS(transcoder);
1849 u32 val;
1850
1851 val = intel_de_read(dev_priv, reg);
1852 val &= ~HSW_FRAME_START_DELAY_MASK;
1853 val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
1854 intel_de_write(dev_priv, reg, val);
1855 }
1856
icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)1857 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1858 const struct intel_crtc_state *crtc_state)
1859 {
1860 struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
1861
1862 /*
1863 * Enable sequence steps 1-7 on bigjoiner master
1864 */
1865 if (intel_crtc_is_bigjoiner_slave(crtc_state))
1866 intel_encoders_pre_pll_enable(state, master_crtc);
1867
1868 if (crtc_state->shared_dpll)
1869 intel_enable_shared_dpll(crtc_state);
1870
1871 if (intel_crtc_is_bigjoiner_slave(crtc_state))
1872 intel_encoders_pre_enable(state, master_crtc);
1873 }
1874
hsw_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)1875 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1876 {
1877 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1878 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1879 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1880
1881 if (crtc_state->has_pch_encoder) {
1882 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1883 &crtc_state->fdi_m_n);
1884 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1885 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1886 &crtc_state->dp_m_n);
1887 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1888 &crtc_state->dp_m2_n2);
1889 }
1890
1891 intel_set_transcoder_timings(crtc_state);
1892
1893 if (cpu_transcoder != TRANSCODER_EDP)
1894 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
1895 crtc_state->pixel_multiplier - 1);
1896
1897 hsw_set_frame_start_delay(crtc_state);
1898
1899 hsw_set_transconf(crtc_state);
1900 }
1901
hsw_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1902 static void hsw_crtc_enable(struct intel_atomic_state *state,
1903 struct intel_crtc *crtc)
1904 {
1905 const struct intel_crtc_state *new_crtc_state =
1906 intel_atomic_get_new_crtc_state(state, crtc);
1907 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1908 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
1909 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1910 bool psl_clkgate_wa;
1911
1912 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1913 return;
1914
1915 if (!new_crtc_state->bigjoiner_pipes) {
1916 intel_encoders_pre_pll_enable(state, crtc);
1917
1918 if (new_crtc_state->shared_dpll)
1919 intel_enable_shared_dpll(new_crtc_state);
1920
1921 intel_encoders_pre_enable(state, crtc);
1922 } else {
1923 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
1924 }
1925
1926 intel_dsc_enable(new_crtc_state);
1927
1928 if (DISPLAY_VER(dev_priv) >= 13)
1929 intel_uncompressed_joiner_enable(new_crtc_state);
1930
1931 intel_set_pipe_src_size(new_crtc_state);
1932 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1933 bdw_set_pipemisc(new_crtc_state);
1934
1935 if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
1936 !transcoder_is_dsi(cpu_transcoder))
1937 hsw_configure_cpu_transcoder(new_crtc_state);
1938
1939 crtc->active = true;
1940
1941 /* Display WA #1180: WaDisableScalarClockGating: glk */
1942 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
1943 new_crtc_state->pch_pfit.enabled;
1944 if (psl_clkgate_wa)
1945 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
1946
1947 if (DISPLAY_VER(dev_priv) >= 9)
1948 skl_pfit_enable(new_crtc_state);
1949 else
1950 ilk_pfit_enable(new_crtc_state);
1951
1952 /*
1953 * On ILK+ LUT must be loaded before the pipe is running but with
1954 * clocks enabled
1955 */
1956 intel_color_load_luts(new_crtc_state);
1957 intel_color_commit_noarm(new_crtc_state);
1958 intel_color_commit_arm(new_crtc_state);
1959 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
1960 if (DISPLAY_VER(dev_priv) < 9)
1961 intel_disable_primary_plane(new_crtc_state);
1962
1963 hsw_set_linetime_wm(new_crtc_state);
1964
1965 if (DISPLAY_VER(dev_priv) >= 11)
1966 icl_set_pipe_chicken(new_crtc_state);
1967
1968 intel_initial_watermarks(state, crtc);
1969
1970 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
1971 intel_crtc_vblank_on(new_crtc_state);
1972
1973 intel_encoders_enable(state, crtc);
1974
1975 if (psl_clkgate_wa) {
1976 intel_crtc_wait_for_next_vblank(crtc);
1977 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
1978 }
1979
1980 /* If we change the relative order between pipe/planes enabling, we need
1981 * to change the workaround. */
1982 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
1983 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1984 struct intel_crtc *wa_crtc;
1985
1986 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1987
1988 intel_crtc_wait_for_next_vblank(wa_crtc);
1989 intel_crtc_wait_for_next_vblank(wa_crtc);
1990 }
1991 }
1992
ilk_pfit_disable(const struct intel_crtc_state * old_crtc_state)1993 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
1994 {
1995 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1996 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1997 enum pipe pipe = crtc->pipe;
1998
1999 /* To avoid upsetting the power well on haswell only disable the pfit if
2000 * it's in use. The hw state code will make sure we get this right. */
2001 if (!old_crtc_state->pch_pfit.enabled)
2002 return;
2003
2004 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
2005 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
2006 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
2007 }
2008
ilk_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2009 static void ilk_crtc_disable(struct intel_atomic_state *state,
2010 struct intel_crtc *crtc)
2011 {
2012 const struct intel_crtc_state *old_crtc_state =
2013 intel_atomic_get_old_crtc_state(state, crtc);
2014 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2015 enum pipe pipe = crtc->pipe;
2016
2017 /*
2018 * Sometimes spurious CPU pipe underruns happen when the
2019 * pipe is already disabled, but FDI RX/TX is still enabled.
2020 * Happens at least with VGA+HDMI cloning. Suppress them.
2021 */
2022 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2023 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2024
2025 intel_encoders_disable(state, crtc);
2026
2027 intel_crtc_vblank_off(old_crtc_state);
2028
2029 intel_disable_transcoder(old_crtc_state);
2030
2031 ilk_pfit_disable(old_crtc_state);
2032
2033 if (old_crtc_state->has_pch_encoder)
2034 ilk_pch_disable(state, crtc);
2035
2036 intel_encoders_post_disable(state, crtc);
2037
2038 if (old_crtc_state->has_pch_encoder)
2039 ilk_pch_post_disable(state, crtc);
2040
2041 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2042 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2043 }
2044
hsw_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2045 static void hsw_crtc_disable(struct intel_atomic_state *state,
2046 struct intel_crtc *crtc)
2047 {
2048 const struct intel_crtc_state *old_crtc_state =
2049 intel_atomic_get_old_crtc_state(state, crtc);
2050
2051 /*
2052 * FIXME collapse everything to one hook.
2053 * Need care with mst->ddi interactions.
2054 */
2055 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
2056 intel_encoders_disable(state, crtc);
2057 intel_encoders_post_disable(state, crtc);
2058 }
2059 }
2060
i9xx_pfit_enable(const struct intel_crtc_state * crtc_state)2061 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2062 {
2063 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2064 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2065
2066 if (!crtc_state->gmch_pfit.control)
2067 return;
2068
2069 /*
2070 * The panel fitter should only be adjusted whilst the pipe is disabled,
2071 * according to register description and PRM.
2072 */
2073 drm_WARN_ON(&dev_priv->drm,
2074 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2075 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2076
2077 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2078 crtc_state->gmch_pfit.pgm_ratios);
2079 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2080
2081 /* Border color in case we don't scale up to the full screen. Black by
2082 * default, change to something else for debugging. */
2083 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2084 }
2085
intel_phy_is_combo(struct drm_i915_private * dev_priv,enum phy phy)2086 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2087 {
2088 if (phy == PHY_NONE)
2089 return false;
2090 else if (IS_ALDERLAKE_S(dev_priv))
2091 return phy <= PHY_E;
2092 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2093 return phy <= PHY_D;
2094 else if (IS_JSL_EHL(dev_priv))
2095 return phy <= PHY_C;
2096 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
2097 return phy <= PHY_B;
2098 else
2099 /*
2100 * DG2 outputs labelled as "combo PHY" in the bspec use
2101 * SNPS PHYs with completely different programming,
2102 * hence we always return false here.
2103 */
2104 return false;
2105 }
2106
intel_phy_is_tc(struct drm_i915_private * dev_priv,enum phy phy)2107 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2108 {
2109 if (IS_DG2(dev_priv))
2110 /* DG2's "TC1" output uses a SNPS PHY */
2111 return false;
2112 else if (IS_ALDERLAKE_P(dev_priv))
2113 return phy >= PHY_F && phy <= PHY_I;
2114 else if (IS_TIGERLAKE(dev_priv))
2115 return phy >= PHY_D && phy <= PHY_I;
2116 else if (IS_ICELAKE(dev_priv))
2117 return phy >= PHY_C && phy <= PHY_F;
2118 else
2119 return false;
2120 }
2121
intel_phy_is_snps(struct drm_i915_private * dev_priv,enum phy phy)2122 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2123 {
2124 if (phy == PHY_NONE)
2125 return false;
2126 else if (IS_DG2(dev_priv))
2127 /*
2128 * All four "combo" ports and the TC1 port (PHY E) use
2129 * Synopsis PHYs.
2130 */
2131 return phy <= PHY_E;
2132
2133 return false;
2134 }
2135
intel_port_to_phy(struct drm_i915_private * i915,enum port port)2136 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2137 {
2138 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2139 return PHY_D + port - PORT_D_XELPD;
2140 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2141 return PHY_F + port - PORT_TC1;
2142 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2143 return PHY_B + port - PORT_TC1;
2144 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2145 return PHY_C + port - PORT_TC1;
2146 else if (IS_JSL_EHL(i915) && port == PORT_D)
2147 return PHY_A;
2148
2149 return PHY_A + port - PORT_A;
2150 }
2151
intel_port_to_tc(struct drm_i915_private * dev_priv,enum port port)2152 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2153 {
2154 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2155 return TC_PORT_NONE;
2156
2157 if (DISPLAY_VER(dev_priv) >= 12)
2158 return TC_PORT_1 + port - PORT_TC1;
2159 else
2160 return TC_PORT_1 + port - PORT_C;
2161 }
2162
2163 enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port * dig_port)2164 intel_aux_power_domain(struct intel_digital_port *dig_port)
2165 {
2166 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
2167
2168 if (intel_tc_port_in_tbt_alt_mode(dig_port))
2169 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
2170
2171 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
2172 }
2173
get_crtc_power_domains(struct intel_crtc_state * crtc_state,struct intel_power_domain_mask * mask)2174 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2175 struct intel_power_domain_mask *mask)
2176 {
2177 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2178 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2179 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2180 struct drm_encoder *encoder;
2181 enum pipe pipe = crtc->pipe;
2182
2183 bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
2184
2185 if (!crtc_state->hw.active)
2186 return;
2187
2188 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
2189 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
2190 if (crtc_state->pch_pfit.enabled ||
2191 crtc_state->pch_pfit.force_thru)
2192 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
2193
2194 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2195 crtc_state->uapi.encoder_mask) {
2196 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2197
2198 set_bit(intel_encoder->power_domain, mask->bits);
2199 }
2200
2201 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2202 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
2203
2204 if (crtc_state->shared_dpll)
2205 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
2206
2207 if (crtc_state->dsc.compression_enable)
2208 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
2209 }
2210
intel_modeset_get_crtc_power_domains(struct intel_crtc_state * crtc_state,struct intel_power_domain_mask * old_domains)2211 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2212 struct intel_power_domain_mask *old_domains)
2213 {
2214 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2215 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2216 enum intel_display_power_domain domain;
2217 struct intel_power_domain_mask domains, new_domains;
2218
2219 get_crtc_power_domains(crtc_state, &domains);
2220
2221 bitmap_andnot(new_domains.bits,
2222 domains.bits,
2223 crtc->enabled_power_domains.mask.bits,
2224 POWER_DOMAIN_NUM);
2225 bitmap_andnot(old_domains->bits,
2226 crtc->enabled_power_domains.mask.bits,
2227 domains.bits,
2228 POWER_DOMAIN_NUM);
2229
2230 for_each_power_domain(domain, &new_domains)
2231 intel_display_power_get_in_set(dev_priv,
2232 &crtc->enabled_power_domains,
2233 domain);
2234 }
2235
intel_modeset_put_crtc_power_domains(struct intel_crtc * crtc,struct intel_power_domain_mask * domains)2236 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2237 struct intel_power_domain_mask *domains)
2238 {
2239 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2240 &crtc->enabled_power_domains,
2241 domains);
2242 }
2243
i9xx_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)2244 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2245 {
2246 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2247 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2248
2249 if (intel_crtc_has_dp_encoder(crtc_state)) {
2250 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
2251 &crtc_state->dp_m_n);
2252 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
2253 &crtc_state->dp_m2_n2);
2254 }
2255
2256 intel_set_transcoder_timings(crtc_state);
2257
2258 i9xx_set_pipeconf(crtc_state);
2259 }
2260
valleyview_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2261 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2262 struct intel_crtc *crtc)
2263 {
2264 const struct intel_crtc_state *new_crtc_state =
2265 intel_atomic_get_new_crtc_state(state, crtc);
2266 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2267 enum pipe pipe = crtc->pipe;
2268
2269 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2270 return;
2271
2272 i9xx_configure_cpu_transcoder(new_crtc_state);
2273
2274 intel_set_pipe_src_size(new_crtc_state);
2275
2276 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2277 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2278 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2279 }
2280
2281 crtc->active = true;
2282
2283 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2284
2285 intel_encoders_pre_pll_enable(state, crtc);
2286
2287 if (IS_CHERRYVIEW(dev_priv))
2288 chv_enable_pll(new_crtc_state);
2289 else
2290 vlv_enable_pll(new_crtc_state);
2291
2292 intel_encoders_pre_enable(state, crtc);
2293
2294 i9xx_pfit_enable(new_crtc_state);
2295
2296 intel_color_load_luts(new_crtc_state);
2297 intel_color_commit_noarm(new_crtc_state);
2298 intel_color_commit_arm(new_crtc_state);
2299 /* update DSPCNTR to configure gamma for pipe bottom color */
2300 intel_disable_primary_plane(new_crtc_state);
2301
2302 intel_initial_watermarks(state, crtc);
2303 intel_enable_transcoder(new_crtc_state);
2304
2305 intel_crtc_vblank_on(new_crtc_state);
2306
2307 intel_encoders_enable(state, crtc);
2308 }
2309
i9xx_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2310 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2311 struct intel_crtc *crtc)
2312 {
2313 const struct intel_crtc_state *new_crtc_state =
2314 intel_atomic_get_new_crtc_state(state, crtc);
2315 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2316 enum pipe pipe = crtc->pipe;
2317
2318 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2319 return;
2320
2321 i9xx_configure_cpu_transcoder(new_crtc_state);
2322
2323 intel_set_pipe_src_size(new_crtc_state);
2324
2325 crtc->active = true;
2326
2327 if (DISPLAY_VER(dev_priv) != 2)
2328 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2329
2330 intel_encoders_pre_enable(state, crtc);
2331
2332 i9xx_enable_pll(new_crtc_state);
2333
2334 i9xx_pfit_enable(new_crtc_state);
2335
2336 intel_color_load_luts(new_crtc_state);
2337 intel_color_commit_noarm(new_crtc_state);
2338 intel_color_commit_arm(new_crtc_state);
2339 /* update DSPCNTR to configure gamma for pipe bottom color */
2340 intel_disable_primary_plane(new_crtc_state);
2341
2342 if (!intel_initial_watermarks(state, crtc))
2343 intel_update_watermarks(dev_priv);
2344 intel_enable_transcoder(new_crtc_state);
2345
2346 intel_crtc_vblank_on(new_crtc_state);
2347
2348 intel_encoders_enable(state, crtc);
2349
2350 /* prevents spurious underruns */
2351 if (DISPLAY_VER(dev_priv) == 2)
2352 intel_crtc_wait_for_next_vblank(crtc);
2353 }
2354
i9xx_pfit_disable(const struct intel_crtc_state * old_crtc_state)2355 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2356 {
2357 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2358 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2359
2360 if (!old_crtc_state->gmch_pfit.control)
2361 return;
2362
2363 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2364
2365 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2366 intel_de_read(dev_priv, PFIT_CONTROL));
2367 intel_de_write(dev_priv, PFIT_CONTROL, 0);
2368 }
2369
i9xx_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2370 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2371 struct intel_crtc *crtc)
2372 {
2373 struct intel_crtc_state *old_crtc_state =
2374 intel_atomic_get_old_crtc_state(state, crtc);
2375 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2376 enum pipe pipe = crtc->pipe;
2377
2378 /*
2379 * On gen2 planes are double buffered but the pipe isn't, so we must
2380 * wait for planes to fully turn off before disabling the pipe.
2381 */
2382 if (DISPLAY_VER(dev_priv) == 2)
2383 intel_crtc_wait_for_next_vblank(crtc);
2384
2385 intel_encoders_disable(state, crtc);
2386
2387 intel_crtc_vblank_off(old_crtc_state);
2388
2389 intel_disable_transcoder(old_crtc_state);
2390
2391 i9xx_pfit_disable(old_crtc_state);
2392
2393 intel_encoders_post_disable(state, crtc);
2394
2395 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2396 if (IS_CHERRYVIEW(dev_priv))
2397 chv_disable_pll(dev_priv, pipe);
2398 else if (IS_VALLEYVIEW(dev_priv))
2399 vlv_disable_pll(dev_priv, pipe);
2400 else
2401 i9xx_disable_pll(old_crtc_state);
2402 }
2403
2404 intel_encoders_post_pll_disable(state, crtc);
2405
2406 if (DISPLAY_VER(dev_priv) != 2)
2407 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2408
2409 if (!dev_priv->display.funcs.wm->initial_watermarks)
2410 intel_update_watermarks(dev_priv);
2411
2412 /* clock the pipe down to 640x480@60 to potentially save power */
2413 if (IS_I830(dev_priv))
2414 i830_enable_pipe(dev_priv, pipe);
2415 }
2416
2417
2418 /*
2419 * turn all crtc's off, but do not adjust state
2420 * This has to be paired with a call to intel_modeset_setup_hw_state.
2421 */
intel_display_suspend(struct drm_device * dev)2422 int intel_display_suspend(struct drm_device *dev)
2423 {
2424 struct drm_i915_private *dev_priv = to_i915(dev);
2425 struct drm_atomic_state *state;
2426 int ret;
2427
2428 if (!HAS_DISPLAY(dev_priv))
2429 return 0;
2430
2431 state = drm_atomic_helper_suspend(dev);
2432 ret = PTR_ERR_OR_ZERO(state);
2433 if (ret)
2434 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2435 ret);
2436 else
2437 dev_priv->modeset_restore_state = state;
2438 return ret;
2439 }
2440
intel_encoder_destroy(struct drm_encoder * encoder)2441 void intel_encoder_destroy(struct drm_encoder *encoder)
2442 {
2443 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2444
2445 drm_encoder_cleanup(encoder);
2446 kfree(intel_encoder);
2447 }
2448
intel_crtc_supports_double_wide(const struct intel_crtc * crtc)2449 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2450 {
2451 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2452
2453 /* GDG double wide on either pipe, otherwise pipe A only */
2454 return DISPLAY_VER(dev_priv) < 4 &&
2455 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2456 }
2457
ilk_pipe_pixel_rate(const struct intel_crtc_state * crtc_state)2458 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2459 {
2460 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2461 struct drm_rect src;
2462
2463 /*
2464 * We only use IF-ID interlacing. If we ever use
2465 * PF-ID we'll need to adjust the pixel_rate here.
2466 */
2467
2468 if (!crtc_state->pch_pfit.enabled)
2469 return pixel_rate;
2470
2471 drm_rect_init(&src, 0, 0,
2472 drm_rect_width(&crtc_state->pipe_src) << 16,
2473 drm_rect_height(&crtc_state->pipe_src) << 16);
2474
2475 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2476 pixel_rate);
2477 }
2478
intel_mode_from_crtc_timings(struct drm_display_mode * mode,const struct drm_display_mode * timings)2479 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2480 const struct drm_display_mode *timings)
2481 {
2482 mode->hdisplay = timings->crtc_hdisplay;
2483 mode->htotal = timings->crtc_htotal;
2484 mode->hsync_start = timings->crtc_hsync_start;
2485 mode->hsync_end = timings->crtc_hsync_end;
2486
2487 mode->vdisplay = timings->crtc_vdisplay;
2488 mode->vtotal = timings->crtc_vtotal;
2489 mode->vsync_start = timings->crtc_vsync_start;
2490 mode->vsync_end = timings->crtc_vsync_end;
2491
2492 mode->flags = timings->flags;
2493 mode->type = DRM_MODE_TYPE_DRIVER;
2494
2495 mode->clock = timings->crtc_clock;
2496
2497 drm_mode_set_name(mode);
2498 }
2499
intel_crtc_compute_pixel_rate(struct intel_crtc_state * crtc_state)2500 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2501 {
2502 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2503
2504 if (HAS_GMCH(dev_priv))
2505 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2506 crtc_state->pixel_rate =
2507 crtc_state->hw.pipe_mode.crtc_clock;
2508 else
2509 crtc_state->pixel_rate =
2510 ilk_pipe_pixel_rate(crtc_state);
2511 }
2512
intel_bigjoiner_adjust_timings(const struct intel_crtc_state * crtc_state,struct drm_display_mode * mode)2513 static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2514 struct drm_display_mode *mode)
2515 {
2516 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2517
2518 if (num_pipes < 2)
2519 return;
2520
2521 mode->crtc_clock /= num_pipes;
2522 mode->crtc_hdisplay /= num_pipes;
2523 mode->crtc_hblank_start /= num_pipes;
2524 mode->crtc_hblank_end /= num_pipes;
2525 mode->crtc_hsync_start /= num_pipes;
2526 mode->crtc_hsync_end /= num_pipes;
2527 mode->crtc_htotal /= num_pipes;
2528 }
2529
intel_splitter_adjust_timings(const struct intel_crtc_state * crtc_state,struct drm_display_mode * mode)2530 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2531 struct drm_display_mode *mode)
2532 {
2533 int overlap = crtc_state->splitter.pixel_overlap;
2534 int n = crtc_state->splitter.link_count;
2535
2536 if (!crtc_state->splitter.enable)
2537 return;
2538
2539 /*
2540 * eDP MSO uses segment timings from EDID for transcoder
2541 * timings, but full mode for everything else.
2542 *
2543 * h_full = (h_segment - pixel_overlap) * link_count
2544 */
2545 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2546 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2547 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2548 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2549 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2550 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2551 mode->crtc_clock *= n;
2552 }
2553
intel_crtc_readout_derived_state(struct intel_crtc_state * crtc_state)2554 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2555 {
2556 struct drm_display_mode *mode = &crtc_state->hw.mode;
2557 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2558 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2559
2560 /*
2561 * Start with the adjusted_mode crtc timings, which
2562 * have been filled with the transcoder timings.
2563 */
2564 drm_mode_copy(pipe_mode, adjusted_mode);
2565
2566 /* Expand MSO per-segment transcoder timings to full */
2567 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2568
2569 /*
2570 * We want the full numbers in adjusted_mode normal timings,
2571 * adjusted_mode crtc timings are left with the raw transcoder
2572 * timings.
2573 */
2574 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2575
2576 /* Populate the "user" mode with full numbers */
2577 drm_mode_copy(mode, pipe_mode);
2578 intel_mode_from_crtc_timings(mode, mode);
2579 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2580 (intel_bigjoiner_num_pipes(crtc_state) ?: 1);
2581 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2582
2583 /* Derive per-pipe timings in case bigjoiner is used */
2584 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2585 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2586
2587 intel_crtc_compute_pixel_rate(crtc_state);
2588 }
2589
intel_encoder_get_config(struct intel_encoder * encoder,struct intel_crtc_state * crtc_state)2590 void intel_encoder_get_config(struct intel_encoder *encoder,
2591 struct intel_crtc_state *crtc_state)
2592 {
2593 encoder->get_config(encoder, crtc_state);
2594
2595 intel_crtc_readout_derived_state(crtc_state);
2596 }
2597
intel_bigjoiner_compute_pipe_src(struct intel_crtc_state * crtc_state)2598 static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2599 {
2600 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2601 int width, height;
2602
2603 if (num_pipes < 2)
2604 return;
2605
2606 width = drm_rect_width(&crtc_state->pipe_src);
2607 height = drm_rect_height(&crtc_state->pipe_src);
2608
2609 drm_rect_init(&crtc_state->pipe_src, 0, 0,
2610 width / num_pipes, height);
2611 }
2612
intel_crtc_compute_pipe_src(struct intel_crtc_state * crtc_state)2613 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2614 {
2615 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2616 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2617
2618 intel_bigjoiner_compute_pipe_src(crtc_state);
2619
2620 /*
2621 * Pipe horizontal size must be even in:
2622 * - DVO ganged mode
2623 * - LVDS dual channel mode
2624 * - Double wide pipe
2625 */
2626 if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2627 if (crtc_state->double_wide) {
2628 drm_dbg_kms(&i915->drm,
2629 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2630 crtc->base.base.id, crtc->base.name);
2631 return -EINVAL;
2632 }
2633
2634 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2635 intel_is_dual_link_lvds(i915)) {
2636 drm_dbg_kms(&i915->drm,
2637 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2638 crtc->base.base.id, crtc->base.name);
2639 return -EINVAL;
2640 }
2641 }
2642
2643 return 0;
2644 }
2645
intel_crtc_compute_pipe_mode(struct intel_crtc_state * crtc_state)2646 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2647 {
2648 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2649 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2650 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2651 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2652 int clock_limit = i915->max_dotclk_freq;
2653
2654 /*
2655 * Start with the adjusted_mode crtc timings, which
2656 * have been filled with the transcoder timings.
2657 */
2658 drm_mode_copy(pipe_mode, adjusted_mode);
2659
2660 /* Expand MSO per-segment transcoder timings to full */
2661 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2662
2663 /* Derive per-pipe timings in case bigjoiner is used */
2664 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2665 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2666
2667 if (DISPLAY_VER(i915) < 4) {
2668 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
2669
2670 /*
2671 * Enable double wide mode when the dot clock
2672 * is > 90% of the (display) core speed.
2673 */
2674 if (intel_crtc_supports_double_wide(crtc) &&
2675 pipe_mode->crtc_clock > clock_limit) {
2676 clock_limit = i915->max_dotclk_freq;
2677 crtc_state->double_wide = true;
2678 }
2679 }
2680
2681 if (pipe_mode->crtc_clock > clock_limit) {
2682 drm_dbg_kms(&i915->drm,
2683 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2684 crtc->base.base.id, crtc->base.name,
2685 pipe_mode->crtc_clock, clock_limit,
2686 str_yes_no(crtc_state->double_wide));
2687 return -EINVAL;
2688 }
2689
2690 return 0;
2691 }
2692
intel_crtc_compute_config(struct intel_atomic_state * state,struct intel_crtc * crtc)2693 static int intel_crtc_compute_config(struct intel_atomic_state *state,
2694 struct intel_crtc *crtc)
2695 {
2696 struct intel_crtc_state *crtc_state =
2697 intel_atomic_get_new_crtc_state(state, crtc);
2698 int ret;
2699
2700 ret = intel_dpll_crtc_compute_clock(state, crtc);
2701 if (ret)
2702 return ret;
2703
2704 ret = intel_crtc_compute_pipe_src(crtc_state);
2705 if (ret)
2706 return ret;
2707
2708 ret = intel_crtc_compute_pipe_mode(crtc_state);
2709 if (ret)
2710 return ret;
2711
2712 intel_crtc_compute_pixel_rate(crtc_state);
2713
2714 if (crtc_state->has_pch_encoder)
2715 return ilk_fdi_compute_config(crtc, crtc_state);
2716
2717 return 0;
2718 }
2719
2720 static void
intel_reduce_m_n_ratio(u32 * num,u32 * den)2721 intel_reduce_m_n_ratio(u32 *num, u32 *den)
2722 {
2723 while (*num > DATA_LINK_M_N_MASK ||
2724 *den > DATA_LINK_M_N_MASK) {
2725 *num >>= 1;
2726 *den >>= 1;
2727 }
2728 }
2729
compute_m_n(u32 * ret_m,u32 * ret_n,u32 m,u32 n,u32 constant_n)2730 static void compute_m_n(u32 *ret_m, u32 *ret_n,
2731 u32 m, u32 n, u32 constant_n)
2732 {
2733 if (constant_n)
2734 *ret_n = constant_n;
2735 else
2736 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2737
2738 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2739 intel_reduce_m_n_ratio(ret_m, ret_n);
2740 }
2741
2742 void
intel_link_compute_m_n(u16 bits_per_pixel,int nlanes,int pixel_clock,int link_clock,struct intel_link_m_n * m_n,bool fec_enable)2743 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
2744 int pixel_clock, int link_clock,
2745 struct intel_link_m_n *m_n,
2746 bool fec_enable)
2747 {
2748 u32 data_clock = bits_per_pixel * pixel_clock;
2749
2750 if (fec_enable)
2751 data_clock = intel_dp_mode_to_fec_clock(data_clock);
2752
2753 /*
2754 * Windows/BIOS uses fixed M/N values always. Follow suit.
2755 *
2756 * Also several DP dongles in particular seem to be fussy
2757 * about too large link M/N values. Presumably the 20bit
2758 * value used by Windows/BIOS is acceptable to everyone.
2759 */
2760 m_n->tu = 64;
2761 compute_m_n(&m_n->data_m, &m_n->data_n,
2762 data_clock, link_clock * nlanes * 8,
2763 0x8000000);
2764
2765 compute_m_n(&m_n->link_m, &m_n->link_n,
2766 pixel_clock, link_clock,
2767 0x80000);
2768 }
2769
intel_panel_sanitize_ssc(struct drm_i915_private * dev_priv)2770 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2771 {
2772 /*
2773 * There may be no VBT; and if the BIOS enabled SSC we can
2774 * just keep using it to avoid unnecessary flicker. Whereas if the
2775 * BIOS isn't using it, don't assume it will work even if the VBT
2776 * indicates as much.
2777 */
2778 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2779 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2780 PCH_DREF_CONTROL) &
2781 DREF_SSC1_ENABLE;
2782
2783 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2784 drm_dbg_kms(&dev_priv->drm,
2785 "SSC %s by BIOS, overriding VBT which says %s\n",
2786 str_enabled_disabled(bios_lvds_use_ssc),
2787 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
2788 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
2789 }
2790 }
2791 }
2792
intel_zero_m_n(struct intel_link_m_n * m_n)2793 void intel_zero_m_n(struct intel_link_m_n *m_n)
2794 {
2795 /* corresponds to 0 register value */
2796 memset(m_n, 0, sizeof(*m_n));
2797 m_n->tu = 1;
2798 }
2799
intel_set_m_n(struct drm_i915_private * i915,const struct intel_link_m_n * m_n,i915_reg_t data_m_reg,i915_reg_t data_n_reg,i915_reg_t link_m_reg,i915_reg_t link_n_reg)2800 void intel_set_m_n(struct drm_i915_private *i915,
2801 const struct intel_link_m_n *m_n,
2802 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2803 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2804 {
2805 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2806 intel_de_write(i915, data_n_reg, m_n->data_n);
2807 intel_de_write(i915, link_m_reg, m_n->link_m);
2808 /*
2809 * On BDW+ writing LINK_N arms the double buffered update
2810 * of all the M/N registers, so it must be written last.
2811 */
2812 intel_de_write(i915, link_n_reg, m_n->link_n);
2813 }
2814
intel_cpu_transcoder_has_m2_n2(struct drm_i915_private * dev_priv,enum transcoder transcoder)2815 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
2816 enum transcoder transcoder)
2817 {
2818 if (IS_HASWELL(dev_priv))
2819 return transcoder == TRANSCODER_EDP;
2820
2821 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
2822 }
2823
intel_cpu_transcoder_set_m1_n1(struct intel_crtc * crtc,enum transcoder transcoder,const struct intel_link_m_n * m_n)2824 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
2825 enum transcoder transcoder,
2826 const struct intel_link_m_n *m_n)
2827 {
2828 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2829 enum pipe pipe = crtc->pipe;
2830
2831 if (DISPLAY_VER(dev_priv) >= 5)
2832 intel_set_m_n(dev_priv, m_n,
2833 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
2834 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
2835 else
2836 intel_set_m_n(dev_priv, m_n,
2837 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
2838 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
2839 }
2840
intel_cpu_transcoder_set_m2_n2(struct intel_crtc * crtc,enum transcoder transcoder,const struct intel_link_m_n * m_n)2841 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
2842 enum transcoder transcoder,
2843 const struct intel_link_m_n *m_n)
2844 {
2845 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2846
2847 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
2848 return;
2849
2850 intel_set_m_n(dev_priv, m_n,
2851 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
2852 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
2853 }
2854
intel_set_transcoder_timings(const struct intel_crtc_state * crtc_state)2855 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
2856 {
2857 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2858 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2859 enum pipe pipe = crtc->pipe;
2860 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2861 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2862 u32 crtc_vtotal, crtc_vblank_end;
2863 int vsyncshift = 0;
2864
2865 /* We need to be careful not to changed the adjusted mode, for otherwise
2866 * the hw state checker will get angry at the mismatch. */
2867 crtc_vtotal = adjusted_mode->crtc_vtotal;
2868 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2869
2870 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
2871 /* the chip adds 2 halflines automatically */
2872 crtc_vtotal -= 1;
2873 crtc_vblank_end -= 1;
2874
2875 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2876 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
2877 else
2878 vsyncshift = adjusted_mode->crtc_hsync_start -
2879 adjusted_mode->crtc_htotal / 2;
2880 if (vsyncshift < 0)
2881 vsyncshift += adjusted_mode->crtc_htotal;
2882 }
2883
2884 if (DISPLAY_VER(dev_priv) > 3)
2885 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
2886 vsyncshift);
2887
2888 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
2889 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
2890 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
2891 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
2892 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
2893 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
2894
2895 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
2896 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
2897 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
2898 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
2899 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
2900 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
2901
2902 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
2903 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
2904 * documented on the DDI_FUNC_CTL register description, EDP Input Select
2905 * bits. */
2906 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
2907 (pipe == PIPE_B || pipe == PIPE_C))
2908 intel_de_write(dev_priv, VTOTAL(pipe),
2909 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2910
2911 }
2912
intel_set_pipe_src_size(const struct intel_crtc_state * crtc_state)2913 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
2914 {
2915 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2916 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2917 int width = drm_rect_width(&crtc_state->pipe_src);
2918 int height = drm_rect_height(&crtc_state->pipe_src);
2919 enum pipe pipe = crtc->pipe;
2920
2921 /* pipesrc controls the size that is scaled from, which should
2922 * always be the user's requested size.
2923 */
2924 intel_de_write(dev_priv, PIPESRC(pipe),
2925 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
2926 }
2927
intel_pipe_is_interlaced(const struct intel_crtc_state * crtc_state)2928 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
2929 {
2930 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2931 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2932
2933 if (DISPLAY_VER(dev_priv) == 2)
2934 return false;
2935
2936 if (DISPLAY_VER(dev_priv) >= 9 ||
2937 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2938 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
2939 else
2940 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
2941 }
2942
intel_get_transcoder_timings(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)2943 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
2944 struct intel_crtc_state *pipe_config)
2945 {
2946 struct drm_device *dev = crtc->base.dev;
2947 struct drm_i915_private *dev_priv = to_i915(dev);
2948 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2949 u32 tmp;
2950
2951 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
2952 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
2953 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
2954
2955 if (!transcoder_is_dsi(cpu_transcoder)) {
2956 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
2957 pipe_config->hw.adjusted_mode.crtc_hblank_start =
2958 (tmp & 0xffff) + 1;
2959 pipe_config->hw.adjusted_mode.crtc_hblank_end =
2960 ((tmp >> 16) & 0xffff) + 1;
2961 }
2962 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
2963 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
2964 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
2965
2966 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
2967 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
2968 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
2969
2970 if (!transcoder_is_dsi(cpu_transcoder)) {
2971 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
2972 pipe_config->hw.adjusted_mode.crtc_vblank_start =
2973 (tmp & 0xffff) + 1;
2974 pipe_config->hw.adjusted_mode.crtc_vblank_end =
2975 ((tmp >> 16) & 0xffff) + 1;
2976 }
2977 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
2978 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
2979 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
2980
2981 if (intel_pipe_is_interlaced(pipe_config)) {
2982 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
2983 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
2984 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
2985 }
2986 }
2987
intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state * crtc_state)2988 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
2989 {
2990 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2991 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2992 enum pipe master_pipe, pipe = crtc->pipe;
2993 int width;
2994
2995 if (num_pipes < 2)
2996 return;
2997
2998 master_pipe = bigjoiner_master_pipe(crtc_state);
2999 width = drm_rect_width(&crtc_state->pipe_src);
3000
3001 drm_rect_translate_to(&crtc_state->pipe_src,
3002 (pipe - master_pipe) * width, 0);
3003 }
3004
intel_get_pipe_src_size(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3005 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3006 struct intel_crtc_state *pipe_config)
3007 {
3008 struct drm_device *dev = crtc->base.dev;
3009 struct drm_i915_private *dev_priv = to_i915(dev);
3010 u32 tmp;
3011
3012 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3013
3014 drm_rect_init(&pipe_config->pipe_src, 0, 0,
3015 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
3016 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
3017
3018 intel_bigjoiner_adjust_pipe_src(pipe_config);
3019 }
3020
i9xx_set_pipeconf(const struct intel_crtc_state * crtc_state)3021 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3022 {
3023 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3024 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3025 u32 pipeconf = 0;
3026
3027 /*
3028 * - We keep both pipes enabled on 830
3029 * - During modeset the pipe is still disabled and must remain so
3030 * - During fastset the pipe is already enabled and must remain so
3031 */
3032 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
3033 pipeconf |= PIPECONF_ENABLE;
3034
3035 if (crtc_state->double_wide)
3036 pipeconf |= PIPECONF_DOUBLE_WIDE;
3037
3038 /* only g4x and later have fancy bpc/dither controls */
3039 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3040 IS_CHERRYVIEW(dev_priv)) {
3041 /* Bspec claims that we can't use dithering for 30bpp pipes. */
3042 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3043 pipeconf |= PIPECONF_DITHER_EN |
3044 PIPECONF_DITHER_TYPE_SP;
3045
3046 switch (crtc_state->pipe_bpp) {
3047 default:
3048 /* Case prevented by intel_choose_pipe_bpp_dither. */
3049 MISSING_CASE(crtc_state->pipe_bpp);
3050 fallthrough;
3051 case 18:
3052 pipeconf |= PIPECONF_BPC_6;
3053 break;
3054 case 24:
3055 pipeconf |= PIPECONF_BPC_8;
3056 break;
3057 case 30:
3058 pipeconf |= PIPECONF_BPC_10;
3059 break;
3060 }
3061 }
3062
3063 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3064 if (DISPLAY_VER(dev_priv) < 4 ||
3065 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3066 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3067 else
3068 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3069 } else {
3070 pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
3071 }
3072
3073 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3074 crtc_state->limited_color_range)
3075 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3076
3077 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3078
3079 pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3080
3081 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3082 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3083 }
3084
i9xx_has_pfit(struct drm_i915_private * dev_priv)3085 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3086 {
3087 if (IS_I830(dev_priv))
3088 return false;
3089
3090 return DISPLAY_VER(dev_priv) >= 4 ||
3091 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3092 }
3093
i9xx_get_pfit_config(struct intel_crtc_state * crtc_state)3094 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3095 {
3096 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3097 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3098 u32 tmp;
3099
3100 if (!i9xx_has_pfit(dev_priv))
3101 return;
3102
3103 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3104 if (!(tmp & PFIT_ENABLE))
3105 return;
3106
3107 /* Check whether the pfit is attached to our pipe. */
3108 if (DISPLAY_VER(dev_priv) < 4) {
3109 if (crtc->pipe != PIPE_B)
3110 return;
3111 } else {
3112 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3113 return;
3114 }
3115
3116 crtc_state->gmch_pfit.control = tmp;
3117 crtc_state->gmch_pfit.pgm_ratios =
3118 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3119 }
3120
vlv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3121 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3122 struct intel_crtc_state *pipe_config)
3123 {
3124 struct drm_device *dev = crtc->base.dev;
3125 struct drm_i915_private *dev_priv = to_i915(dev);
3126 enum pipe pipe = crtc->pipe;
3127 struct dpll clock;
3128 u32 mdiv;
3129 int refclk = 100000;
3130
3131 /* In case of DSI, DPLL will not be used */
3132 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3133 return;
3134
3135 vlv_dpio_get(dev_priv);
3136 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3137 vlv_dpio_put(dev_priv);
3138
3139 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3140 clock.m2 = mdiv & DPIO_M2DIV_MASK;
3141 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3142 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3143 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3144
3145 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3146 }
3147
chv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3148 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3149 struct intel_crtc_state *pipe_config)
3150 {
3151 struct drm_device *dev = crtc->base.dev;
3152 struct drm_i915_private *dev_priv = to_i915(dev);
3153 enum pipe pipe = crtc->pipe;
3154 enum dpio_channel port = vlv_pipe_to_channel(pipe);
3155 struct dpll clock;
3156 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3157 int refclk = 100000;
3158
3159 /* In case of DSI, DPLL will not be used */
3160 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3161 return;
3162
3163 vlv_dpio_get(dev_priv);
3164 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3165 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3166 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3167 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3168 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3169 vlv_dpio_put(dev_priv);
3170
3171 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3172 clock.m2 = (pll_dw0 & 0xff) << 22;
3173 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3174 clock.m2 |= pll_dw2 & 0x3fffff;
3175 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3176 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3177 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3178
3179 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3180 }
3181
3182 static enum intel_output_format
bdw_get_pipemisc_output_format(struct intel_crtc * crtc)3183 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3184 {
3185 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3186 u32 tmp;
3187
3188 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3189
3190 if (tmp & PIPEMISC_YUV420_ENABLE) {
3191 /* We support 4:2:0 in full blend mode only */
3192 drm_WARN_ON(&dev_priv->drm,
3193 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3194
3195 return INTEL_OUTPUT_FORMAT_YCBCR420;
3196 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3197 return INTEL_OUTPUT_FORMAT_YCBCR444;
3198 } else {
3199 return INTEL_OUTPUT_FORMAT_RGB;
3200 }
3201 }
3202
i9xx_get_pipe_color_config(struct intel_crtc_state * crtc_state)3203 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3204 {
3205 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3206 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3207 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3208 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3209 u32 tmp;
3210
3211 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3212
3213 if (tmp & DISP_PIPE_GAMMA_ENABLE)
3214 crtc_state->gamma_enable = true;
3215
3216 if (!HAS_GMCH(dev_priv) &&
3217 tmp & DISP_PIPE_CSC_ENABLE)
3218 crtc_state->csc_enable = true;
3219 }
3220
i9xx_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3221 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3222 struct intel_crtc_state *pipe_config)
3223 {
3224 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3225 enum intel_display_power_domain power_domain;
3226 intel_wakeref_t wakeref;
3227 u32 tmp;
3228 bool ret;
3229
3230 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3231 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3232 if (!wakeref)
3233 return false;
3234
3235 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3236 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3237 pipe_config->shared_dpll = NULL;
3238
3239 ret = false;
3240
3241 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3242 if (!(tmp & PIPECONF_ENABLE))
3243 goto out;
3244
3245 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3246 IS_CHERRYVIEW(dev_priv)) {
3247 switch (tmp & PIPECONF_BPC_MASK) {
3248 case PIPECONF_BPC_6:
3249 pipe_config->pipe_bpp = 18;
3250 break;
3251 case PIPECONF_BPC_8:
3252 pipe_config->pipe_bpp = 24;
3253 break;
3254 case PIPECONF_BPC_10:
3255 pipe_config->pipe_bpp = 30;
3256 break;
3257 default:
3258 MISSING_CASE(tmp);
3259 break;
3260 }
3261 }
3262
3263 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3264 (tmp & PIPECONF_COLOR_RANGE_SELECT))
3265 pipe_config->limited_color_range = true;
3266
3267 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
3268
3269 pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
3270
3271 if (IS_CHERRYVIEW(dev_priv))
3272 pipe_config->cgm_mode = intel_de_read(dev_priv,
3273 CGM_PIPE_MODE(crtc->pipe));
3274
3275 i9xx_get_pipe_color_config(pipe_config);
3276 intel_color_get_config(pipe_config);
3277
3278 if (DISPLAY_VER(dev_priv) < 4)
3279 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3280
3281 intel_get_transcoder_timings(crtc, pipe_config);
3282 intel_get_pipe_src_size(crtc, pipe_config);
3283
3284 i9xx_get_pfit_config(pipe_config);
3285
3286 if (DISPLAY_VER(dev_priv) >= 4) {
3287 /* No way to read it out on pipes B and C */
3288 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3289 tmp = dev_priv->chv_dpll_md[crtc->pipe];
3290 else
3291 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3292 pipe_config->pixel_multiplier =
3293 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3294 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3295 pipe_config->dpll_hw_state.dpll_md = tmp;
3296 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3297 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3298 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3299 pipe_config->pixel_multiplier =
3300 ((tmp & SDVO_MULTIPLIER_MASK)
3301 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3302 } else {
3303 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3304 * port and will be fixed up in the encoder->get_config
3305 * function. */
3306 pipe_config->pixel_multiplier = 1;
3307 }
3308 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3309 DPLL(crtc->pipe));
3310 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3311 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3312 FP0(crtc->pipe));
3313 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3314 FP1(crtc->pipe));
3315 } else {
3316 /* Mask out read-only status bits. */
3317 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3318 DPLL_PORTC_READY_MASK |
3319 DPLL_PORTB_READY_MASK);
3320 }
3321
3322 if (IS_CHERRYVIEW(dev_priv))
3323 chv_crtc_clock_get(crtc, pipe_config);
3324 else if (IS_VALLEYVIEW(dev_priv))
3325 vlv_crtc_clock_get(crtc, pipe_config);
3326 else
3327 i9xx_crtc_clock_get(crtc, pipe_config);
3328
3329 /*
3330 * Normally the dotclock is filled in by the encoder .get_config()
3331 * but in case the pipe is enabled w/o any ports we need a sane
3332 * default.
3333 */
3334 pipe_config->hw.adjusted_mode.crtc_clock =
3335 pipe_config->port_clock / pipe_config->pixel_multiplier;
3336
3337 ret = true;
3338
3339 out:
3340 intel_display_power_put(dev_priv, power_domain, wakeref);
3341
3342 return ret;
3343 }
3344
ilk_set_pipeconf(const struct intel_crtc_state * crtc_state)3345 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3346 {
3347 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3348 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3349 enum pipe pipe = crtc->pipe;
3350 u32 val = 0;
3351
3352 /*
3353 * - During modeset the pipe is still disabled and must remain so
3354 * - During fastset the pipe is already enabled and must remain so
3355 */
3356 if (!intel_crtc_needs_modeset(crtc_state))
3357 val |= PIPECONF_ENABLE;
3358
3359 switch (crtc_state->pipe_bpp) {
3360 default:
3361 /* Case prevented by intel_choose_pipe_bpp_dither. */
3362 MISSING_CASE(crtc_state->pipe_bpp);
3363 fallthrough;
3364 case 18:
3365 val |= PIPECONF_BPC_6;
3366 break;
3367 case 24:
3368 val |= PIPECONF_BPC_8;
3369 break;
3370 case 30:
3371 val |= PIPECONF_BPC_10;
3372 break;
3373 case 36:
3374 val |= PIPECONF_BPC_12;
3375 break;
3376 }
3377
3378 if (crtc_state->dither)
3379 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3380
3381 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3382 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3383 else
3384 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3385
3386 /*
3387 * This would end up with an odd purple hue over
3388 * the entire display. Make sure we don't do it.
3389 */
3390 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3391 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3392
3393 if (crtc_state->limited_color_range &&
3394 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3395 val |= PIPECONF_COLOR_RANGE_SELECT;
3396
3397 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3398 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3399
3400 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3401
3402 val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3403 val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3404
3405 intel_de_write(dev_priv, PIPECONF(pipe), val);
3406 intel_de_posting_read(dev_priv, PIPECONF(pipe));
3407 }
3408
hsw_set_transconf(const struct intel_crtc_state * crtc_state)3409 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3410 {
3411 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3412 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3413 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3414 u32 val = 0;
3415
3416 /*
3417 * - During modeset the pipe is still disabled and must remain so
3418 * - During fastset the pipe is already enabled and must remain so
3419 */
3420 if (!intel_crtc_needs_modeset(crtc_state))
3421 val |= PIPECONF_ENABLE;
3422
3423 if (IS_HASWELL(dev_priv) && crtc_state->dither)
3424 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3425
3426 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3427 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3428 else
3429 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3430
3431 if (IS_HASWELL(dev_priv) &&
3432 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3433 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3434
3435 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3436 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3437 }
3438
bdw_set_pipemisc(const struct intel_crtc_state * crtc_state)3439 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3440 {
3441 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3442 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3443 u32 val = 0;
3444
3445 switch (crtc_state->pipe_bpp) {
3446 case 18:
3447 val |= PIPEMISC_BPC_6;
3448 break;
3449 case 24:
3450 val |= PIPEMISC_BPC_8;
3451 break;
3452 case 30:
3453 val |= PIPEMISC_BPC_10;
3454 break;
3455 case 36:
3456 /* Port output 12BPC defined for ADLP+ */
3457 if (DISPLAY_VER(dev_priv) > 12)
3458 val |= PIPEMISC_BPC_12_ADLP;
3459 break;
3460 default:
3461 MISSING_CASE(crtc_state->pipe_bpp);
3462 break;
3463 }
3464
3465 if (crtc_state->dither)
3466 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3467
3468 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3469 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3470 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3471
3472 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3473 val |= PIPEMISC_YUV420_ENABLE |
3474 PIPEMISC_YUV420_MODE_FULL_BLEND;
3475
3476 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3477 val |= PIPEMISC_HDR_MODE_PRECISION;
3478
3479 if (DISPLAY_VER(dev_priv) >= 12)
3480 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3481
3482 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3483 }
3484
bdw_get_pipemisc_bpp(struct intel_crtc * crtc)3485 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3486 {
3487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3488 u32 tmp;
3489
3490 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3491
3492 switch (tmp & PIPEMISC_BPC_MASK) {
3493 case PIPEMISC_BPC_6:
3494 return 18;
3495 case PIPEMISC_BPC_8:
3496 return 24;
3497 case PIPEMISC_BPC_10:
3498 return 30;
3499 /*
3500 * PORT OUTPUT 12 BPC defined for ADLP+.
3501 *
3502 * TODO:
3503 * For previous platforms with DSI interface, bits 5:7
3504 * are used for storing pipe_bpp irrespective of dithering.
3505 * Since the value of 12 BPC is not defined for these bits
3506 * on older platforms, need to find a workaround for 12 BPC
3507 * MIPI DSI HW readout.
3508 */
3509 case PIPEMISC_BPC_12_ADLP:
3510 if (DISPLAY_VER(dev_priv) > 12)
3511 return 36;
3512 fallthrough;
3513 default:
3514 MISSING_CASE(tmp);
3515 return 0;
3516 }
3517 }
3518
ilk_get_lanes_required(int target_clock,int link_bw,int bpp)3519 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3520 {
3521 /*
3522 * Account for spread spectrum to avoid
3523 * oversubscribing the link. Max center spread
3524 * is 2.5%; use 5% for safety's sake.
3525 */
3526 u32 bps = target_clock * bpp * 21 / 20;
3527 return DIV_ROUND_UP(bps, link_bw * 8);
3528 }
3529
intel_get_m_n(struct drm_i915_private * i915,struct intel_link_m_n * m_n,i915_reg_t data_m_reg,i915_reg_t data_n_reg,i915_reg_t link_m_reg,i915_reg_t link_n_reg)3530 void intel_get_m_n(struct drm_i915_private *i915,
3531 struct intel_link_m_n *m_n,
3532 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3533 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3534 {
3535 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3536 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3537 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3538 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3539 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3540 }
3541
intel_cpu_transcoder_get_m1_n1(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n)3542 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3543 enum transcoder transcoder,
3544 struct intel_link_m_n *m_n)
3545 {
3546 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3547 enum pipe pipe = crtc->pipe;
3548
3549 if (DISPLAY_VER(dev_priv) >= 5)
3550 intel_get_m_n(dev_priv, m_n,
3551 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3552 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3553 else
3554 intel_get_m_n(dev_priv, m_n,
3555 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3556 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3557 }
3558
intel_cpu_transcoder_get_m2_n2(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n)3559 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3560 enum transcoder transcoder,
3561 struct intel_link_m_n *m_n)
3562 {
3563 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3564
3565 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3566 return;
3567
3568 intel_get_m_n(dev_priv, m_n,
3569 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3570 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3571 }
3572
ilk_get_pfit_pos_size(struct intel_crtc_state * crtc_state,u32 pos,u32 size)3573 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3574 u32 pos, u32 size)
3575 {
3576 drm_rect_init(&crtc_state->pch_pfit.dst,
3577 pos >> 16, pos & 0xffff,
3578 size >> 16, size & 0xffff);
3579 }
3580
skl_get_pfit_config(struct intel_crtc_state * crtc_state)3581 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3582 {
3583 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3584 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3585 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3586 int id = -1;
3587 int i;
3588
3589 /* find scaler attached to this pipe */
3590 for (i = 0; i < crtc->num_scalers; i++) {
3591 u32 ctl, pos, size;
3592
3593 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3594 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3595 continue;
3596
3597 id = i;
3598 crtc_state->pch_pfit.enabled = true;
3599
3600 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3601 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3602
3603 ilk_get_pfit_pos_size(crtc_state, pos, size);
3604
3605 scaler_state->scalers[i].in_use = true;
3606 break;
3607 }
3608
3609 scaler_state->scaler_id = id;
3610 if (id >= 0)
3611 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3612 else
3613 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3614 }
3615
ilk_get_pfit_config(struct intel_crtc_state * crtc_state)3616 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3617 {
3618 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3619 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3620 u32 ctl, pos, size;
3621
3622 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3623 if ((ctl & PF_ENABLE) == 0)
3624 return;
3625
3626 crtc_state->pch_pfit.enabled = true;
3627
3628 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3629 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3630
3631 ilk_get_pfit_pos_size(crtc_state, pos, size);
3632
3633 /*
3634 * We currently do not free assignements of panel fitters on
3635 * ivb/hsw (since we don't use the higher upscaling modes which
3636 * differentiates them) so just WARN about this case for now.
3637 */
3638 drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
3639 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
3640 }
3641
ilk_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3642 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3643 struct intel_crtc_state *pipe_config)
3644 {
3645 struct drm_device *dev = crtc->base.dev;
3646 struct drm_i915_private *dev_priv = to_i915(dev);
3647 enum intel_display_power_domain power_domain;
3648 intel_wakeref_t wakeref;
3649 u32 tmp;
3650 bool ret;
3651
3652 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3653 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3654 if (!wakeref)
3655 return false;
3656
3657 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3658 pipe_config->shared_dpll = NULL;
3659
3660 ret = false;
3661 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3662 if (!(tmp & PIPECONF_ENABLE))
3663 goto out;
3664
3665 switch (tmp & PIPECONF_BPC_MASK) {
3666 case PIPECONF_BPC_6:
3667 pipe_config->pipe_bpp = 18;
3668 break;
3669 case PIPECONF_BPC_8:
3670 pipe_config->pipe_bpp = 24;
3671 break;
3672 case PIPECONF_BPC_10:
3673 pipe_config->pipe_bpp = 30;
3674 break;
3675 case PIPECONF_BPC_12:
3676 pipe_config->pipe_bpp = 36;
3677 break;
3678 default:
3679 break;
3680 }
3681
3682 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
3683 pipe_config->limited_color_range = true;
3684
3685 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
3686 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
3687 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
3688 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3689 break;
3690 default:
3691 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3692 break;
3693 }
3694
3695 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
3696
3697 pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
3698
3699 pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
3700
3701 pipe_config->csc_mode = intel_de_read(dev_priv,
3702 PIPE_CSC_MODE(crtc->pipe));
3703
3704 i9xx_get_pipe_color_config(pipe_config);
3705 intel_color_get_config(pipe_config);
3706
3707 pipe_config->pixel_multiplier = 1;
3708
3709 ilk_pch_get_config(pipe_config);
3710
3711 intel_get_transcoder_timings(crtc, pipe_config);
3712 intel_get_pipe_src_size(crtc, pipe_config);
3713
3714 ilk_get_pfit_config(pipe_config);
3715
3716 ret = true;
3717
3718 out:
3719 intel_display_power_put(dev_priv, power_domain, wakeref);
3720
3721 return ret;
3722 }
3723
bigjoiner_pipes(struct drm_i915_private * i915)3724 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
3725 {
3726 u8 pipes;
3727
3728 if (DISPLAY_VER(i915) >= 12)
3729 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3730 else if (DISPLAY_VER(i915) >= 11)
3731 pipes = BIT(PIPE_B) | BIT(PIPE_C);
3732 else
3733 pipes = 0;
3734
3735 return pipes & RUNTIME_INFO(i915)->pipe_mask;
3736 }
3737
transcoder_ddi_func_is_enabled(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)3738 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3739 enum transcoder cpu_transcoder)
3740 {
3741 enum intel_display_power_domain power_domain;
3742 intel_wakeref_t wakeref;
3743 u32 tmp = 0;
3744
3745 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3746
3747 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3748 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3749
3750 return tmp & TRANS_DDI_FUNC_ENABLE;
3751 }
3752
enabled_bigjoiner_pipes(struct drm_i915_private * dev_priv,u8 * master_pipes,u8 * slave_pipes)3753 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
3754 u8 *master_pipes, u8 *slave_pipes)
3755 {
3756 struct intel_crtc *crtc;
3757
3758 *master_pipes = 0;
3759 *slave_pipes = 0;
3760
3761 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3762 bigjoiner_pipes(dev_priv)) {
3763 enum intel_display_power_domain power_domain;
3764 enum pipe pipe = crtc->pipe;
3765 intel_wakeref_t wakeref;
3766
3767 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3768 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3769 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3770
3771 if (!(tmp & BIG_JOINER_ENABLE))
3772 continue;
3773
3774 if (tmp & MASTER_BIG_JOINER_ENABLE)
3775 *master_pipes |= BIT(pipe);
3776 else
3777 *slave_pipes |= BIT(pipe);
3778 }
3779
3780 if (DISPLAY_VER(dev_priv) < 13)
3781 continue;
3782
3783 power_domain = POWER_DOMAIN_PIPE(pipe);
3784 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3785 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3786
3787 if (tmp & UNCOMPRESSED_JOINER_MASTER)
3788 *master_pipes |= BIT(pipe);
3789 if (tmp & UNCOMPRESSED_JOINER_SLAVE)
3790 *slave_pipes |= BIT(pipe);
3791 }
3792 }
3793
3794 /* Bigjoiner pipes should always be consecutive master and slave */
3795 drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
3796 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
3797 *master_pipes, *slave_pipes);
3798 }
3799
get_bigjoiner_master_pipe(enum pipe pipe,u8 master_pipes,u8 slave_pipes)3800 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3801 {
3802 if ((slave_pipes & BIT(pipe)) == 0)
3803 return pipe;
3804
3805 /* ignore everything above our pipe */
3806 master_pipes &= ~GENMASK(7, pipe);
3807
3808 /* highest remaining bit should be our master pipe */
3809 return fls(master_pipes) - 1;
3810 }
3811
get_bigjoiner_slave_pipes(enum pipe pipe,u8 master_pipes,u8 slave_pipes)3812 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3813 {
3814 enum pipe master_pipe, next_master_pipe;
3815
3816 master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
3817
3818 if ((master_pipes & BIT(master_pipe)) == 0)
3819 return 0;
3820
3821 /* ignore our master pipe and everything below it */
3822 master_pipes &= ~GENMASK(master_pipe, 0);
3823 /* make sure a high bit is set for the ffs() */
3824 master_pipes |= BIT(7);
3825 /* lowest remaining bit should be the next master pipe */
3826 next_master_pipe = ffs(master_pipes) - 1;
3827
3828 return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
3829 }
3830
hsw_panel_transcoders(struct drm_i915_private * i915)3831 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3832 {
3833 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
3834
3835 if (DISPLAY_VER(i915) >= 11)
3836 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
3837
3838 return panel_transcoder_mask;
3839 }
3840
hsw_enabled_transcoders(struct intel_crtc * crtc)3841 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
3842 {
3843 struct drm_device *dev = crtc->base.dev;
3844 struct drm_i915_private *dev_priv = to_i915(dev);
3845 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
3846 enum transcoder cpu_transcoder;
3847 u8 master_pipes, slave_pipes;
3848 u8 enabled_transcoders = 0;
3849
3850 /*
3851 * XXX: Do intel_display_power_get_if_enabled before reading this (for
3852 * consistency and less surprising code; it's in always on power).
3853 */
3854 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
3855 panel_transcoder_mask) {
3856 enum intel_display_power_domain power_domain;
3857 intel_wakeref_t wakeref;
3858 enum pipe trans_pipe;
3859 u32 tmp = 0;
3860
3861 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3862 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3863 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3864
3865 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
3866 continue;
3867
3868 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
3869 default:
3870 drm_WARN(dev, 1,
3871 "unknown pipe linked to transcoder %s\n",
3872 transcoder_name(cpu_transcoder));
3873 fallthrough;
3874 case TRANS_DDI_EDP_INPUT_A_ONOFF:
3875 case TRANS_DDI_EDP_INPUT_A_ON:
3876 trans_pipe = PIPE_A;
3877 break;
3878 case TRANS_DDI_EDP_INPUT_B_ONOFF:
3879 trans_pipe = PIPE_B;
3880 break;
3881 case TRANS_DDI_EDP_INPUT_C_ONOFF:
3882 trans_pipe = PIPE_C;
3883 break;
3884 case TRANS_DDI_EDP_INPUT_D_ONOFF:
3885 trans_pipe = PIPE_D;
3886 break;
3887 }
3888
3889 if (trans_pipe == crtc->pipe)
3890 enabled_transcoders |= BIT(cpu_transcoder);
3891 }
3892
3893 /* single pipe or bigjoiner master */
3894 cpu_transcoder = (enum transcoder) crtc->pipe;
3895 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3896 enabled_transcoders |= BIT(cpu_transcoder);
3897
3898 /* bigjoiner slave -> consider the master pipe's transcoder as well */
3899 enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
3900 if (slave_pipes & BIT(crtc->pipe)) {
3901 cpu_transcoder = (enum transcoder)
3902 get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
3903 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3904 enabled_transcoders |= BIT(cpu_transcoder);
3905 }
3906
3907 return enabled_transcoders;
3908 }
3909
has_edp_transcoders(u8 enabled_transcoders)3910 static bool has_edp_transcoders(u8 enabled_transcoders)
3911 {
3912 return enabled_transcoders & BIT(TRANSCODER_EDP);
3913 }
3914
has_dsi_transcoders(u8 enabled_transcoders)3915 static bool has_dsi_transcoders(u8 enabled_transcoders)
3916 {
3917 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
3918 BIT(TRANSCODER_DSI_1));
3919 }
3920
has_pipe_transcoders(u8 enabled_transcoders)3921 static bool has_pipe_transcoders(u8 enabled_transcoders)
3922 {
3923 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
3924 BIT(TRANSCODER_DSI_0) |
3925 BIT(TRANSCODER_DSI_1));
3926 }
3927
assert_enabled_transcoders(struct drm_i915_private * i915,u8 enabled_transcoders)3928 static void assert_enabled_transcoders(struct drm_i915_private *i915,
3929 u8 enabled_transcoders)
3930 {
3931 /* Only one type of transcoder please */
3932 drm_WARN_ON(&i915->drm,
3933 has_edp_transcoders(enabled_transcoders) +
3934 has_dsi_transcoders(enabled_transcoders) +
3935 has_pipe_transcoders(enabled_transcoders) > 1);
3936
3937 /* Only DSI transcoders can be ganged */
3938 drm_WARN_ON(&i915->drm,
3939 !has_dsi_transcoders(enabled_transcoders) &&
3940 !is_power_of_2(enabled_transcoders));
3941 }
3942
hsw_get_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)3943 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
3944 struct intel_crtc_state *pipe_config,
3945 struct intel_display_power_domain_set *power_domain_set)
3946 {
3947 struct drm_device *dev = crtc->base.dev;
3948 struct drm_i915_private *dev_priv = to_i915(dev);
3949 unsigned long enabled_transcoders;
3950 u32 tmp;
3951
3952 enabled_transcoders = hsw_enabled_transcoders(crtc);
3953 if (!enabled_transcoders)
3954 return false;
3955
3956 assert_enabled_transcoders(dev_priv, enabled_transcoders);
3957
3958 /*
3959 * With the exception of DSI we should only ever have
3960 * a single enabled transcoder. With DSI let's just
3961 * pick the first one.
3962 */
3963 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
3964
3965 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3966 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
3967 return false;
3968
3969 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
3970 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
3971
3972 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
3973 pipe_config->pch_pfit.force_thru = true;
3974 }
3975
3976 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
3977
3978 return tmp & PIPECONF_ENABLE;
3979 }
3980
bxt_get_dsi_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)3981 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
3982 struct intel_crtc_state *pipe_config,
3983 struct intel_display_power_domain_set *power_domain_set)
3984 {
3985 struct drm_device *dev = crtc->base.dev;
3986 struct drm_i915_private *dev_priv = to_i915(dev);
3987 enum transcoder cpu_transcoder;
3988 enum port port;
3989 u32 tmp;
3990
3991 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
3992 if (port == PORT_A)
3993 cpu_transcoder = TRANSCODER_DSI_A;
3994 else
3995 cpu_transcoder = TRANSCODER_DSI_C;
3996
3997 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3998 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
3999 continue;
4000
4001 /*
4002 * The PLL needs to be enabled with a valid divider
4003 * configuration, otherwise accessing DSI registers will hang
4004 * the machine. See BSpec North Display Engine
4005 * registers/MIPI[BXT]. We can break out here early, since we
4006 * need the same DSI PLL to be enabled for both DSI ports.
4007 */
4008 if (!bxt_dsi_pll_is_enabled(dev_priv))
4009 break;
4010
4011 /* XXX: this works for video mode only */
4012 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4013 if (!(tmp & DPI_ENABLE))
4014 continue;
4015
4016 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4017 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4018 continue;
4019
4020 pipe_config->cpu_transcoder = cpu_transcoder;
4021 break;
4022 }
4023
4024 return transcoder_is_dsi(pipe_config->cpu_transcoder);
4025 }
4026
intel_bigjoiner_get_config(struct intel_crtc_state * crtc_state)4027 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
4028 {
4029 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4030 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4031 u8 master_pipes, slave_pipes;
4032 enum pipe pipe = crtc->pipe;
4033
4034 enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
4035
4036 if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
4037 return;
4038
4039 crtc_state->bigjoiner_pipes =
4040 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
4041 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
4042 }
4043
hsw_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4044 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4045 struct intel_crtc_state *pipe_config)
4046 {
4047 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4048 struct intel_display_power_domain_set power_domain_set = { };
4049 bool active;
4050 u32 tmp;
4051
4052 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4053 POWER_DOMAIN_PIPE(crtc->pipe)))
4054 return false;
4055
4056 pipe_config->shared_dpll = NULL;
4057
4058 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4059
4060 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4061 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4062 drm_WARN_ON(&dev_priv->drm, active);
4063 active = true;
4064 }
4065
4066 if (!active)
4067 goto out;
4068
4069 intel_dsc_get_config(pipe_config);
4070 intel_bigjoiner_get_config(pipe_config);
4071
4072 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4073 DISPLAY_VER(dev_priv) >= 11)
4074 intel_get_transcoder_timings(crtc, pipe_config);
4075
4076 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4077 intel_vrr_get_config(crtc, pipe_config);
4078
4079 intel_get_pipe_src_size(crtc, pipe_config);
4080
4081 if (IS_HASWELL(dev_priv)) {
4082 u32 tmp = intel_de_read(dev_priv,
4083 PIPECONF(pipe_config->cpu_transcoder));
4084
4085 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4086 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4087 else
4088 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4089 } else {
4090 pipe_config->output_format =
4091 bdw_get_pipemisc_output_format(crtc);
4092 }
4093
4094 pipe_config->gamma_mode = intel_de_read(dev_priv,
4095 GAMMA_MODE(crtc->pipe));
4096
4097 pipe_config->csc_mode = intel_de_read(dev_priv,
4098 PIPE_CSC_MODE(crtc->pipe));
4099
4100 if (DISPLAY_VER(dev_priv) >= 9) {
4101 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4102
4103 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4104 pipe_config->gamma_enable = true;
4105
4106 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4107 pipe_config->csc_enable = true;
4108 } else {
4109 i9xx_get_pipe_color_config(pipe_config);
4110 }
4111
4112 intel_color_get_config(pipe_config);
4113
4114 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4115 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4116 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4117 pipe_config->ips_linetime =
4118 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4119
4120 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4121 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4122 if (DISPLAY_VER(dev_priv) >= 9)
4123 skl_get_pfit_config(pipe_config);
4124 else
4125 ilk_get_pfit_config(pipe_config);
4126 }
4127
4128 hsw_ips_get_config(pipe_config);
4129
4130 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4131 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4132 pipe_config->pixel_multiplier =
4133 intel_de_read(dev_priv,
4134 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4135 } else {
4136 pipe_config->pixel_multiplier = 1;
4137 }
4138
4139 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4140 tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
4141 MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
4142 CHICKEN_TRANS(pipe_config->cpu_transcoder));
4143
4144 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
4145 } else {
4146 /* no idea if this is correct */
4147 pipe_config->framestart_delay = 1;
4148 }
4149
4150 out:
4151 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4152
4153 return active;
4154 }
4155
intel_crtc_get_pipe_config(struct intel_crtc_state * crtc_state)4156 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4157 {
4158 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4159 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4160
4161 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
4162 return false;
4163
4164 crtc_state->hw.active = true;
4165
4166 intel_crtc_readout_derived_state(crtc_state);
4167
4168 return true;
4169 }
4170
4171 /* VESA 640x480x72Hz mode to set on the pipe */
4172 static const struct drm_display_mode load_detect_mode = {
4173 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4174 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4175 };
4176
intel_modeset_disable_planes(struct drm_atomic_state * state,struct drm_crtc * crtc)4177 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4178 struct drm_crtc *crtc)
4179 {
4180 struct drm_plane *plane;
4181 struct drm_plane_state *plane_state;
4182 int ret, i;
4183
4184 ret = drm_atomic_add_affected_planes(state, crtc);
4185 if (ret)
4186 return ret;
4187
4188 for_each_new_plane_in_state(state, plane, plane_state, i) {
4189 if (plane_state->crtc != crtc)
4190 continue;
4191
4192 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4193 if (ret)
4194 return ret;
4195
4196 drm_atomic_set_fb_for_plane(plane_state, NULL);
4197 }
4198
4199 return 0;
4200 }
4201
intel_get_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)4202 int intel_get_load_detect_pipe(struct drm_connector *connector,
4203 struct intel_load_detect_pipe *old,
4204 struct drm_modeset_acquire_ctx *ctx)
4205 {
4206 struct intel_encoder *encoder =
4207 intel_attached_encoder(to_intel_connector(connector));
4208 struct intel_crtc *possible_crtc;
4209 struct intel_crtc *crtc = NULL;
4210 struct drm_device *dev = encoder->base.dev;
4211 struct drm_i915_private *dev_priv = to_i915(dev);
4212 struct drm_mode_config *config = &dev->mode_config;
4213 struct drm_atomic_state *state = NULL, *restore_state = NULL;
4214 struct drm_connector_state *connector_state;
4215 struct intel_crtc_state *crtc_state;
4216 int ret;
4217
4218 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4219 connector->base.id, connector->name,
4220 encoder->base.base.id, encoder->base.name);
4221
4222 old->restore_state = NULL;
4223
4224 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4225
4226 /*
4227 * Algorithm gets a little messy:
4228 *
4229 * - if the connector already has an assigned crtc, use it (but make
4230 * sure it's on first)
4231 *
4232 * - try to find the first unused crtc that can drive this connector,
4233 * and use that if we find one
4234 */
4235
4236 /* See if we already have a CRTC for this connector */
4237 if (connector->state->crtc) {
4238 crtc = to_intel_crtc(connector->state->crtc);
4239
4240 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4241 if (ret)
4242 goto fail;
4243
4244 /* Make sure the crtc and connector are running */
4245 goto found;
4246 }
4247
4248 /* Find an unused one (if possible) */
4249 for_each_intel_crtc(dev, possible_crtc) {
4250 if (!(encoder->base.possible_crtcs &
4251 drm_crtc_mask(&possible_crtc->base)))
4252 continue;
4253
4254 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4255 if (ret)
4256 goto fail;
4257
4258 if (possible_crtc->base.state->enable) {
4259 drm_modeset_unlock(&possible_crtc->base.mutex);
4260 continue;
4261 }
4262
4263 crtc = possible_crtc;
4264 break;
4265 }
4266
4267 /*
4268 * If we didn't find an unused CRTC, don't use any.
4269 */
4270 if (!crtc) {
4271 drm_dbg_kms(&dev_priv->drm,
4272 "no pipe available for load-detect\n");
4273 ret = -ENODEV;
4274 goto fail;
4275 }
4276
4277 found:
4278 state = drm_atomic_state_alloc(dev);
4279 restore_state = drm_atomic_state_alloc(dev);
4280 if (!state || !restore_state) {
4281 ret = -ENOMEM;
4282 goto fail;
4283 }
4284
4285 state->acquire_ctx = ctx;
4286 restore_state->acquire_ctx = ctx;
4287
4288 connector_state = drm_atomic_get_connector_state(state, connector);
4289 if (IS_ERR(connector_state)) {
4290 ret = PTR_ERR(connector_state);
4291 goto fail;
4292 }
4293
4294 ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4295 if (ret)
4296 goto fail;
4297
4298 crtc_state = intel_atomic_get_crtc_state(state, crtc);
4299 if (IS_ERR(crtc_state)) {
4300 ret = PTR_ERR(crtc_state);
4301 goto fail;
4302 }
4303
4304 crtc_state->uapi.active = true;
4305
4306 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4307 &load_detect_mode);
4308 if (ret)
4309 goto fail;
4310
4311 ret = intel_modeset_disable_planes(state, &crtc->base);
4312 if (ret)
4313 goto fail;
4314
4315 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4316 if (!ret)
4317 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4318 if (!ret)
4319 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4320 if (ret) {
4321 drm_dbg_kms(&dev_priv->drm,
4322 "Failed to create a copy of old state to restore: %i\n",
4323 ret);
4324 goto fail;
4325 }
4326
4327 ret = drm_atomic_commit(state);
4328 if (ret) {
4329 drm_dbg_kms(&dev_priv->drm,
4330 "failed to set mode on load-detect pipe\n");
4331 goto fail;
4332 }
4333
4334 old->restore_state = restore_state;
4335 drm_atomic_state_put(state);
4336
4337 /* let the connector get through one full cycle before testing */
4338 intel_crtc_wait_for_next_vblank(crtc);
4339
4340 return true;
4341
4342 fail:
4343 if (state) {
4344 drm_atomic_state_put(state);
4345 state = NULL;
4346 }
4347 if (restore_state) {
4348 drm_atomic_state_put(restore_state);
4349 restore_state = NULL;
4350 }
4351
4352 if (ret == -EDEADLK)
4353 return ret;
4354
4355 return false;
4356 }
4357
intel_release_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)4358 void intel_release_load_detect_pipe(struct drm_connector *connector,
4359 struct intel_load_detect_pipe *old,
4360 struct drm_modeset_acquire_ctx *ctx)
4361 {
4362 struct intel_encoder *intel_encoder =
4363 intel_attached_encoder(to_intel_connector(connector));
4364 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4365 struct drm_encoder *encoder = &intel_encoder->base;
4366 struct drm_atomic_state *state = old->restore_state;
4367 int ret;
4368
4369 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4370 connector->base.id, connector->name,
4371 encoder->base.id, encoder->name);
4372
4373 if (!state)
4374 return;
4375
4376 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4377 if (ret)
4378 drm_dbg_kms(&i915->drm,
4379 "Couldn't release load detect pipe: %i\n", ret);
4380 drm_atomic_state_put(state);
4381 }
4382
i9xx_pll_refclk(struct drm_device * dev,const struct intel_crtc_state * pipe_config)4383 static int i9xx_pll_refclk(struct drm_device *dev,
4384 const struct intel_crtc_state *pipe_config)
4385 {
4386 struct drm_i915_private *dev_priv = to_i915(dev);
4387 u32 dpll = pipe_config->dpll_hw_state.dpll;
4388
4389 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4390 return dev_priv->display.vbt.lvds_ssc_freq;
4391 else if (HAS_PCH_SPLIT(dev_priv))
4392 return 120000;
4393 else if (DISPLAY_VER(dev_priv) != 2)
4394 return 96000;
4395 else
4396 return 48000;
4397 }
4398
4399 /* Returns the clock of the currently programmed mode of the given pipe. */
i9xx_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4400 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4401 struct intel_crtc_state *pipe_config)
4402 {
4403 struct drm_device *dev = crtc->base.dev;
4404 struct drm_i915_private *dev_priv = to_i915(dev);
4405 u32 dpll = pipe_config->dpll_hw_state.dpll;
4406 u32 fp;
4407 struct dpll clock;
4408 int port_clock;
4409 int refclk = i9xx_pll_refclk(dev, pipe_config);
4410
4411 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4412 fp = pipe_config->dpll_hw_state.fp0;
4413 else
4414 fp = pipe_config->dpll_hw_state.fp1;
4415
4416 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4417 if (IS_PINEVIEW(dev_priv)) {
4418 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4419 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4420 } else {
4421 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4422 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4423 }
4424
4425 if (DISPLAY_VER(dev_priv) != 2) {
4426 if (IS_PINEVIEW(dev_priv))
4427 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4428 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4429 else
4430 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4431 DPLL_FPA01_P1_POST_DIV_SHIFT);
4432
4433 switch (dpll & DPLL_MODE_MASK) {
4434 case DPLLB_MODE_DAC_SERIAL:
4435 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4436 5 : 10;
4437 break;
4438 case DPLLB_MODE_LVDS:
4439 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4440 7 : 14;
4441 break;
4442 default:
4443 drm_dbg_kms(&dev_priv->drm,
4444 "Unknown DPLL mode %08x in programmed "
4445 "mode\n", (int)(dpll & DPLL_MODE_MASK));
4446 return;
4447 }
4448
4449 if (IS_PINEVIEW(dev_priv))
4450 port_clock = pnv_calc_dpll_params(refclk, &clock);
4451 else
4452 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4453 } else {
4454 enum pipe lvds_pipe;
4455
4456 if (IS_I85X(dev_priv) &&
4457 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4458 lvds_pipe == crtc->pipe) {
4459 u32 lvds = intel_de_read(dev_priv, LVDS);
4460
4461 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4462 DPLL_FPA01_P1_POST_DIV_SHIFT);
4463
4464 if (lvds & LVDS_CLKB_POWER_UP)
4465 clock.p2 = 7;
4466 else
4467 clock.p2 = 14;
4468 } else {
4469 if (dpll & PLL_P1_DIVIDE_BY_TWO)
4470 clock.p1 = 2;
4471 else {
4472 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4473 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4474 }
4475 if (dpll & PLL_P2_DIVIDE_BY_4)
4476 clock.p2 = 4;
4477 else
4478 clock.p2 = 2;
4479 }
4480
4481 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4482 }
4483
4484 /*
4485 * This value includes pixel_multiplier. We will use
4486 * port_clock to compute adjusted_mode.crtc_clock in the
4487 * encoder's get_config() function.
4488 */
4489 pipe_config->port_clock = port_clock;
4490 }
4491
intel_dotclock_calculate(int link_freq,const struct intel_link_m_n * m_n)4492 int intel_dotclock_calculate(int link_freq,
4493 const struct intel_link_m_n *m_n)
4494 {
4495 /*
4496 * The calculation for the data clock is:
4497 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4498 * But we want to avoid losing precison if possible, so:
4499 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4500 *
4501 * and the link clock is simpler:
4502 * link_clock = (m * link_clock) / n
4503 */
4504
4505 if (!m_n->link_n)
4506 return 0;
4507
4508 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
4509 m_n->link_n);
4510 }
4511
intel_crtc_dotclock(const struct intel_crtc_state * pipe_config)4512 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
4513 {
4514 int dotclock;
4515
4516 if (intel_crtc_has_dp_encoder(pipe_config))
4517 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
4518 &pipe_config->dp_m_n);
4519 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
4520 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
4521 pipe_config->pipe_bpp);
4522 else
4523 dotclock = pipe_config->port_clock;
4524
4525 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
4526 !intel_crtc_has_dp_encoder(pipe_config))
4527 dotclock *= 2;
4528
4529 if (pipe_config->pixel_multiplier)
4530 dotclock /= pipe_config->pixel_multiplier;
4531
4532 return dotclock;
4533 }
4534
4535 /* Returns the currently programmed mode of the given encoder. */
4536 struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder * encoder)4537 intel_encoder_current_mode(struct intel_encoder *encoder)
4538 {
4539 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4540 struct intel_crtc_state *crtc_state;
4541 struct drm_display_mode *mode;
4542 struct intel_crtc *crtc;
4543 enum pipe pipe;
4544
4545 if (!encoder->get_hw_state(encoder, &pipe))
4546 return NULL;
4547
4548 crtc = intel_crtc_for_pipe(dev_priv, pipe);
4549
4550 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4551 if (!mode)
4552 return NULL;
4553
4554 crtc_state = intel_crtc_state_alloc(crtc);
4555 if (!crtc_state) {
4556 kfree(mode);
4557 return NULL;
4558 }
4559
4560 if (!intel_crtc_get_pipe_config(crtc_state)) {
4561 kfree(crtc_state);
4562 kfree(mode);
4563 return NULL;
4564 }
4565
4566 intel_encoder_get_config(encoder, crtc_state);
4567
4568 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4569
4570 kfree(crtc_state);
4571
4572 return mode;
4573 }
4574
encoders_cloneable(const struct intel_encoder * a,const struct intel_encoder * b)4575 static bool encoders_cloneable(const struct intel_encoder *a,
4576 const struct intel_encoder *b)
4577 {
4578 /* masks could be asymmetric, so check both ways */
4579 return a == b || (a->cloneable & (1 << b->type) &&
4580 b->cloneable & (1 << a->type));
4581 }
4582
check_single_encoder_cloning(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4583 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
4584 struct intel_crtc *crtc,
4585 struct intel_encoder *encoder)
4586 {
4587 struct intel_encoder *source_encoder;
4588 struct drm_connector *connector;
4589 struct drm_connector_state *connector_state;
4590 int i;
4591
4592 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4593 if (connector_state->crtc != &crtc->base)
4594 continue;
4595
4596 source_encoder =
4597 to_intel_encoder(connector_state->best_encoder);
4598 if (!encoders_cloneable(encoder, source_encoder))
4599 return false;
4600 }
4601
4602 return true;
4603 }
4604
icl_add_linked_planes(struct intel_atomic_state * state)4605 static int icl_add_linked_planes(struct intel_atomic_state *state)
4606 {
4607 struct intel_plane *plane, *linked;
4608 struct intel_plane_state *plane_state, *linked_plane_state;
4609 int i;
4610
4611 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4612 linked = plane_state->planar_linked_plane;
4613
4614 if (!linked)
4615 continue;
4616
4617 linked_plane_state = intel_atomic_get_plane_state(state, linked);
4618 if (IS_ERR(linked_plane_state))
4619 return PTR_ERR(linked_plane_state);
4620
4621 drm_WARN_ON(state->base.dev,
4622 linked_plane_state->planar_linked_plane != plane);
4623 drm_WARN_ON(state->base.dev,
4624 linked_plane_state->planar_slave == plane_state->planar_slave);
4625 }
4626
4627 return 0;
4628 }
4629
icl_check_nv12_planes(struct intel_crtc_state * crtc_state)4630 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
4631 {
4632 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4633 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4634 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
4635 struct intel_plane *plane, *linked;
4636 struct intel_plane_state *plane_state;
4637 int i;
4638
4639 if (DISPLAY_VER(dev_priv) < 11)
4640 return 0;
4641
4642 /*
4643 * Destroy all old plane links and make the slave plane invisible
4644 * in the crtc_state->active_planes mask.
4645 */
4646 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4647 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
4648 continue;
4649
4650 plane_state->planar_linked_plane = NULL;
4651 if (plane_state->planar_slave && !plane_state->uapi.visible) {
4652 crtc_state->enabled_planes &= ~BIT(plane->id);
4653 crtc_state->active_planes &= ~BIT(plane->id);
4654 crtc_state->update_planes |= BIT(plane->id);
4655 crtc_state->data_rate[plane->id] = 0;
4656 crtc_state->rel_data_rate[plane->id] = 0;
4657 }
4658
4659 plane_state->planar_slave = false;
4660 }
4661
4662 if (!crtc_state->nv12_planes)
4663 return 0;
4664
4665 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4666 struct intel_plane_state *linked_state = NULL;
4667
4668 if (plane->pipe != crtc->pipe ||
4669 !(crtc_state->nv12_planes & BIT(plane->id)))
4670 continue;
4671
4672 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4673 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4674 continue;
4675
4676 if (crtc_state->active_planes & BIT(linked->id))
4677 continue;
4678
4679 linked_state = intel_atomic_get_plane_state(state, linked);
4680 if (IS_ERR(linked_state))
4681 return PTR_ERR(linked_state);
4682
4683 break;
4684 }
4685
4686 if (!linked_state) {
4687 drm_dbg_kms(&dev_priv->drm,
4688 "Need %d free Y planes for planar YUV\n",
4689 hweight8(crtc_state->nv12_planes));
4690
4691 return -EINVAL;
4692 }
4693
4694 plane_state->planar_linked_plane = linked;
4695
4696 linked_state->planar_slave = true;
4697 linked_state->planar_linked_plane = plane;
4698 crtc_state->enabled_planes |= BIT(linked->id);
4699 crtc_state->active_planes |= BIT(linked->id);
4700 crtc_state->update_planes |= BIT(linked->id);
4701 crtc_state->data_rate[linked->id] =
4702 crtc_state->data_rate_y[plane->id];
4703 crtc_state->rel_data_rate[linked->id] =
4704 crtc_state->rel_data_rate_y[plane->id];
4705 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4706 linked->base.name, plane->base.name);
4707
4708 /* Copy parameters to slave plane */
4709 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4710 linked_state->color_ctl = plane_state->color_ctl;
4711 linked_state->view = plane_state->view;
4712 linked_state->decrypt = plane_state->decrypt;
4713
4714 intel_plane_copy_hw_state(linked_state, plane_state);
4715 linked_state->uapi.src = plane_state->uapi.src;
4716 linked_state->uapi.dst = plane_state->uapi.dst;
4717
4718 if (icl_is_hdr_plane(dev_priv, plane->id)) {
4719 if (linked->id == PLANE_SPRITE5)
4720 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4721 else if (linked->id == PLANE_SPRITE4)
4722 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4723 else if (linked->id == PLANE_SPRITE3)
4724 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4725 else if (linked->id == PLANE_SPRITE2)
4726 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4727 else
4728 MISSING_CASE(linked->id);
4729 }
4730 }
4731
4732 return 0;
4733 }
4734
c8_planes_changed(const struct intel_crtc_state * new_crtc_state)4735 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
4736 {
4737 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4738 struct intel_atomic_state *state =
4739 to_intel_atomic_state(new_crtc_state->uapi.state);
4740 const struct intel_crtc_state *old_crtc_state =
4741 intel_atomic_get_old_crtc_state(state, crtc);
4742
4743 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
4744 }
4745
hsw_linetime_wm(const struct intel_crtc_state * crtc_state)4746 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4747 {
4748 const struct drm_display_mode *pipe_mode =
4749 &crtc_state->hw.pipe_mode;
4750 int linetime_wm;
4751
4752 if (!crtc_state->hw.enable)
4753 return 0;
4754
4755 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4756 pipe_mode->crtc_clock);
4757
4758 return min(linetime_wm, 0x1ff);
4759 }
4760
hsw_ips_linetime_wm(const struct intel_crtc_state * crtc_state,const struct intel_cdclk_state * cdclk_state)4761 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4762 const struct intel_cdclk_state *cdclk_state)
4763 {
4764 const struct drm_display_mode *pipe_mode =
4765 &crtc_state->hw.pipe_mode;
4766 int linetime_wm;
4767
4768 if (!crtc_state->hw.enable)
4769 return 0;
4770
4771 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4772 cdclk_state->logical.cdclk);
4773
4774 return min(linetime_wm, 0x1ff);
4775 }
4776
skl_linetime_wm(const struct intel_crtc_state * crtc_state)4777 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4778 {
4779 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4780 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4781 const struct drm_display_mode *pipe_mode =
4782 &crtc_state->hw.pipe_mode;
4783 int linetime_wm;
4784
4785 if (!crtc_state->hw.enable)
4786 return 0;
4787
4788 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4789 crtc_state->pixel_rate);
4790
4791 /* Display WA #1135: BXT:ALL GLK:ALL */
4792 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4793 skl_watermark_ipc_enabled(dev_priv))
4794 linetime_wm /= 2;
4795
4796 return min(linetime_wm, 0x1ff);
4797 }
4798
hsw_compute_linetime_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)4799 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4800 struct intel_crtc *crtc)
4801 {
4802 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4803 struct intel_crtc_state *crtc_state =
4804 intel_atomic_get_new_crtc_state(state, crtc);
4805 const struct intel_cdclk_state *cdclk_state;
4806
4807 if (DISPLAY_VER(dev_priv) >= 9)
4808 crtc_state->linetime = skl_linetime_wm(crtc_state);
4809 else
4810 crtc_state->linetime = hsw_linetime_wm(crtc_state);
4811
4812 if (!hsw_crtc_supports_ips(crtc))
4813 return 0;
4814
4815 cdclk_state = intel_atomic_get_cdclk_state(state);
4816 if (IS_ERR(cdclk_state))
4817 return PTR_ERR(cdclk_state);
4818
4819 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4820 cdclk_state);
4821
4822 return 0;
4823 }
4824
intel_crtc_atomic_check(struct intel_atomic_state * state,struct intel_crtc * crtc)4825 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4826 struct intel_crtc *crtc)
4827 {
4828 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4829 struct intel_crtc_state *crtc_state =
4830 intel_atomic_get_new_crtc_state(state, crtc);
4831 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
4832 int ret;
4833
4834 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4835 mode_changed && !crtc_state->hw.active)
4836 crtc_state->update_wm_post = true;
4837
4838 if (mode_changed) {
4839 ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
4840 if (ret)
4841 return ret;
4842 }
4843
4844 /*
4845 * May need to update pipe gamma enable bits
4846 * when C8 planes are getting enabled/disabled.
4847 */
4848 if (c8_planes_changed(crtc_state))
4849 crtc_state->uapi.color_mgmt_changed = true;
4850
4851 if (mode_changed || crtc_state->update_pipe ||
4852 crtc_state->uapi.color_mgmt_changed) {
4853 ret = intel_color_check(crtc_state);
4854 if (ret)
4855 return ret;
4856 }
4857
4858 ret = intel_compute_pipe_wm(state, crtc);
4859 if (ret) {
4860 drm_dbg_kms(&dev_priv->drm,
4861 "Target pipe watermarks are invalid\n");
4862 return ret;
4863 }
4864
4865 /*
4866 * Calculate 'intermediate' watermarks that satisfy both the
4867 * old state and the new state. We can program these
4868 * immediately.
4869 */
4870 ret = intel_compute_intermediate_wm(state, crtc);
4871 if (ret) {
4872 drm_dbg_kms(&dev_priv->drm,
4873 "No valid intermediate pipe watermarks are possible\n");
4874 return ret;
4875 }
4876
4877 if (DISPLAY_VER(dev_priv) >= 9) {
4878 if (mode_changed || crtc_state->update_pipe) {
4879 ret = skl_update_scaler_crtc(crtc_state);
4880 if (ret)
4881 return ret;
4882 }
4883
4884 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
4885 if (ret)
4886 return ret;
4887 }
4888
4889 if (HAS_IPS(dev_priv)) {
4890 ret = hsw_ips_compute_config(state, crtc);
4891 if (ret)
4892 return ret;
4893 }
4894
4895 if (DISPLAY_VER(dev_priv) >= 9 ||
4896 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
4897 ret = hsw_compute_linetime_wm(state, crtc);
4898 if (ret)
4899 return ret;
4900
4901 }
4902
4903 ret = intel_psr2_sel_fetch_update(state, crtc);
4904 if (ret)
4905 return ret;
4906
4907 return 0;
4908 }
4909
4910 static int
compute_sink_pipe_bpp(const struct drm_connector_state * conn_state,struct intel_crtc_state * crtc_state)4911 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
4912 struct intel_crtc_state *crtc_state)
4913 {
4914 struct drm_connector *connector = conn_state->connector;
4915 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4916 const struct drm_display_info *info = &connector->display_info;
4917 int bpp;
4918
4919 switch (conn_state->max_bpc) {
4920 case 6 ... 7:
4921 bpp = 6 * 3;
4922 break;
4923 case 8 ... 9:
4924 bpp = 8 * 3;
4925 break;
4926 case 10 ... 11:
4927 bpp = 10 * 3;
4928 break;
4929 case 12 ... 16:
4930 bpp = 12 * 3;
4931 break;
4932 default:
4933 MISSING_CASE(conn_state->max_bpc);
4934 return -EINVAL;
4935 }
4936
4937 if (bpp < crtc_state->pipe_bpp) {
4938 drm_dbg_kms(&i915->drm,
4939 "[CONNECTOR:%d:%s] Limiting display bpp to %d "
4940 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
4941 connector->base.id, connector->name,
4942 bpp, 3 * info->bpc,
4943 3 * conn_state->max_requested_bpc,
4944 crtc_state->pipe_bpp);
4945
4946 crtc_state->pipe_bpp = bpp;
4947 }
4948
4949 return 0;
4950 }
4951
4952 static int
compute_baseline_pipe_bpp(struct intel_atomic_state * state,struct intel_crtc * crtc)4953 compute_baseline_pipe_bpp(struct intel_atomic_state *state,
4954 struct intel_crtc *crtc)
4955 {
4956 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4957 struct intel_crtc_state *crtc_state =
4958 intel_atomic_get_new_crtc_state(state, crtc);
4959 struct drm_connector *connector;
4960 struct drm_connector_state *connector_state;
4961 int bpp, i;
4962
4963 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4964 IS_CHERRYVIEW(dev_priv)))
4965 bpp = 10*3;
4966 else if (DISPLAY_VER(dev_priv) >= 5)
4967 bpp = 12*3;
4968 else
4969 bpp = 8*3;
4970
4971 crtc_state->pipe_bpp = bpp;
4972
4973 /* Clamp display bpp to connector max bpp */
4974 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4975 int ret;
4976
4977 if (connector_state->crtc != &crtc->base)
4978 continue;
4979
4980 ret = compute_sink_pipe_bpp(connector_state, crtc_state);
4981 if (ret)
4982 return ret;
4983 }
4984
4985 return 0;
4986 }
4987
check_digital_port_conflicts(struct intel_atomic_state * state)4988 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
4989 {
4990 struct drm_device *dev = state->base.dev;
4991 struct drm_connector *connector;
4992 struct drm_connector_list_iter conn_iter;
4993 unsigned int used_ports = 0;
4994 unsigned int used_mst_ports = 0;
4995 bool ret = true;
4996
4997 /*
4998 * We're going to peek into connector->state,
4999 * hence connection_mutex must be held.
5000 */
5001 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5002
5003 /*
5004 * Walk the connector list instead of the encoder
5005 * list to detect the problem on ddi platforms
5006 * where there's just one encoder per digital port.
5007 */
5008 drm_connector_list_iter_begin(dev, &conn_iter);
5009 drm_for_each_connector_iter(connector, &conn_iter) {
5010 struct drm_connector_state *connector_state;
5011 struct intel_encoder *encoder;
5012
5013 connector_state =
5014 drm_atomic_get_new_connector_state(&state->base,
5015 connector);
5016 if (!connector_state)
5017 connector_state = connector->state;
5018
5019 if (!connector_state->best_encoder)
5020 continue;
5021
5022 encoder = to_intel_encoder(connector_state->best_encoder);
5023
5024 drm_WARN_ON(dev, !connector_state->crtc);
5025
5026 switch (encoder->type) {
5027 case INTEL_OUTPUT_DDI:
5028 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5029 break;
5030 fallthrough;
5031 case INTEL_OUTPUT_DP:
5032 case INTEL_OUTPUT_HDMI:
5033 case INTEL_OUTPUT_EDP:
5034 /* the same port mustn't appear more than once */
5035 if (used_ports & BIT(encoder->port))
5036 ret = false;
5037
5038 used_ports |= BIT(encoder->port);
5039 break;
5040 case INTEL_OUTPUT_DP_MST:
5041 used_mst_ports |=
5042 1 << encoder->port;
5043 break;
5044 default:
5045 break;
5046 }
5047 }
5048 drm_connector_list_iter_end(&conn_iter);
5049
5050 /* can't mix MST and SST/HDMI on the same port */
5051 if (used_ports & used_mst_ports)
5052 return false;
5053
5054 return ret;
5055 }
5056
5057 static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc * crtc)5058 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5059 struct intel_crtc *crtc)
5060 {
5061 struct intel_crtc_state *crtc_state =
5062 intel_atomic_get_new_crtc_state(state, crtc);
5063
5064 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
5065
5066 drm_property_replace_blob(&crtc_state->hw.degamma_lut,
5067 crtc_state->uapi.degamma_lut);
5068 drm_property_replace_blob(&crtc_state->hw.gamma_lut,
5069 crtc_state->uapi.gamma_lut);
5070 drm_property_replace_blob(&crtc_state->hw.ctm,
5071 crtc_state->uapi.ctm);
5072 }
5073
5074 static void
intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state * state,struct intel_crtc * crtc)5075 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
5076 struct intel_crtc *crtc)
5077 {
5078 struct intel_crtc_state *crtc_state =
5079 intel_atomic_get_new_crtc_state(state, crtc);
5080
5081 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
5082
5083 crtc_state->hw.enable = crtc_state->uapi.enable;
5084 crtc_state->hw.active = crtc_state->uapi.active;
5085 drm_mode_copy(&crtc_state->hw.mode,
5086 &crtc_state->uapi.mode);
5087 drm_mode_copy(&crtc_state->hw.adjusted_mode,
5088 &crtc_state->uapi.adjusted_mode);
5089 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5090
5091 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
5092 }
5093
5094 static void
copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc * slave_crtc)5095 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
5096 struct intel_crtc *slave_crtc)
5097 {
5098 struct intel_crtc_state *slave_crtc_state =
5099 intel_atomic_get_new_crtc_state(state, slave_crtc);
5100 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
5101 const struct intel_crtc_state *master_crtc_state =
5102 intel_atomic_get_new_crtc_state(state, master_crtc);
5103
5104 drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
5105 master_crtc_state->hw.degamma_lut);
5106 drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
5107 master_crtc_state->hw.gamma_lut);
5108 drm_property_replace_blob(&slave_crtc_state->hw.ctm,
5109 master_crtc_state->hw.ctm);
5110
5111 slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
5112 }
5113
5114 static int
copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state * state,struct intel_crtc * slave_crtc)5115 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
5116 struct intel_crtc *slave_crtc)
5117 {
5118 struct intel_crtc_state *slave_crtc_state =
5119 intel_atomic_get_new_crtc_state(state, slave_crtc);
5120 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
5121 const struct intel_crtc_state *master_crtc_state =
5122 intel_atomic_get_new_crtc_state(state, master_crtc);
5123 struct intel_crtc_state *saved_state;
5124
5125 WARN_ON(master_crtc_state->bigjoiner_pipes !=
5126 slave_crtc_state->bigjoiner_pipes);
5127
5128 saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5129 if (!saved_state)
5130 return -ENOMEM;
5131
5132 /* preserve some things from the slave's original crtc state */
5133 saved_state->uapi = slave_crtc_state->uapi;
5134 saved_state->scaler_state = slave_crtc_state->scaler_state;
5135 saved_state->shared_dpll = slave_crtc_state->shared_dpll;
5136 saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
5137 saved_state->crc_enabled = slave_crtc_state->crc_enabled;
5138
5139 intel_crtc_free_hw_state(slave_crtc_state);
5140 memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
5141 kfree(saved_state);
5142
5143 /* Re-init hw state */
5144 memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
5145 slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
5146 slave_crtc_state->hw.active = master_crtc_state->hw.active;
5147 drm_mode_copy(&slave_crtc_state->hw.mode,
5148 &master_crtc_state->hw.mode);
5149 drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
5150 &master_crtc_state->hw.pipe_mode);
5151 drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
5152 &master_crtc_state->hw.adjusted_mode);
5153 slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
5154
5155 copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
5156
5157 slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
5158 slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
5159 slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
5160
5161 WARN_ON(master_crtc_state->bigjoiner_pipes !=
5162 slave_crtc_state->bigjoiner_pipes);
5163
5164 return 0;
5165 }
5166
5167 static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state * state,struct intel_crtc * crtc)5168 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5169 struct intel_crtc *crtc)
5170 {
5171 struct intel_crtc_state *crtc_state =
5172 intel_atomic_get_new_crtc_state(state, crtc);
5173 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5174 struct intel_crtc_state *saved_state;
5175
5176 saved_state = intel_crtc_state_alloc(crtc);
5177 if (!saved_state)
5178 return -ENOMEM;
5179
5180 /* free the old crtc_state->hw members */
5181 intel_crtc_free_hw_state(crtc_state);
5182
5183 /* FIXME: before the switch to atomic started, a new pipe_config was
5184 * kzalloc'd. Code that depends on any field being zero should be
5185 * fixed, so that the crtc_state can be safely duplicated. For now,
5186 * only fields that are know to not cause problems are preserved. */
5187
5188 saved_state->uapi = crtc_state->uapi;
5189 saved_state->scaler_state = crtc_state->scaler_state;
5190 saved_state->shared_dpll = crtc_state->shared_dpll;
5191 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5192 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5193 sizeof(saved_state->icl_port_dplls));
5194 saved_state->crc_enabled = crtc_state->crc_enabled;
5195 if (IS_G4X(dev_priv) ||
5196 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5197 saved_state->wm = crtc_state->wm;
5198
5199 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5200 kfree(saved_state);
5201
5202 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
5203
5204 return 0;
5205 }
5206
5207 static int
intel_modeset_pipe_config(struct intel_atomic_state * state,struct intel_crtc * crtc)5208 intel_modeset_pipe_config(struct intel_atomic_state *state,
5209 struct intel_crtc *crtc)
5210 {
5211 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5212 struct intel_crtc_state *crtc_state =
5213 intel_atomic_get_new_crtc_state(state, crtc);
5214 struct drm_connector *connector;
5215 struct drm_connector_state *connector_state;
5216 int pipe_src_w, pipe_src_h;
5217 int base_bpp, ret, i;
5218 bool retry = true;
5219
5220 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
5221
5222 crtc_state->framestart_delay = 1;
5223
5224 /*
5225 * Sanitize sync polarity flags based on requested ones. If neither
5226 * positive or negative polarity is requested, treat this as meaning
5227 * negative polarity.
5228 */
5229 if (!(crtc_state->hw.adjusted_mode.flags &
5230 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5231 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5232
5233 if (!(crtc_state->hw.adjusted_mode.flags &
5234 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5235 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5236
5237 ret = compute_baseline_pipe_bpp(state, crtc);
5238 if (ret)
5239 return ret;
5240
5241 base_bpp = crtc_state->pipe_bpp;
5242
5243 /*
5244 * Determine the real pipe dimensions. Note that stereo modes can
5245 * increase the actual pipe size due to the frame doubling and
5246 * insertion of additional space for blanks between the frame. This
5247 * is stored in the crtc timings. We use the requested mode to do this
5248 * computation to clearly distinguish it from the adjusted mode, which
5249 * can be changed by the connectors in the below retry loop.
5250 */
5251 drm_mode_get_hv_timing(&crtc_state->hw.mode,
5252 &pipe_src_w, &pipe_src_h);
5253 drm_rect_init(&crtc_state->pipe_src, 0, 0,
5254 pipe_src_w, pipe_src_h);
5255
5256 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5257 struct intel_encoder *encoder =
5258 to_intel_encoder(connector_state->best_encoder);
5259
5260 if (connector_state->crtc != &crtc->base)
5261 continue;
5262
5263 if (!check_single_encoder_cloning(state, crtc, encoder)) {
5264 drm_dbg_kms(&i915->drm,
5265 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
5266 encoder->base.base.id, encoder->base.name);
5267 return -EINVAL;
5268 }
5269
5270 /*
5271 * Determine output_types before calling the .compute_config()
5272 * hooks so that the hooks can use this information safely.
5273 */
5274 if (encoder->compute_output_type)
5275 crtc_state->output_types |=
5276 BIT(encoder->compute_output_type(encoder, crtc_state,
5277 connector_state));
5278 else
5279 crtc_state->output_types |= BIT(encoder->type);
5280 }
5281
5282 encoder_retry:
5283 /* Ensure the port clock defaults are reset when retrying. */
5284 crtc_state->port_clock = 0;
5285 crtc_state->pixel_multiplier = 1;
5286
5287 /* Fill in default crtc timings, allow encoders to overwrite them. */
5288 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
5289 CRTC_STEREO_DOUBLE);
5290
5291 /* Pass our mode to the connectors and the CRTC to give them a chance to
5292 * adjust it according to limitations or connector properties, and also
5293 * a chance to reject the mode entirely.
5294 */
5295 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5296 struct intel_encoder *encoder =
5297 to_intel_encoder(connector_state->best_encoder);
5298
5299 if (connector_state->crtc != &crtc->base)
5300 continue;
5301
5302 ret = encoder->compute_config(encoder, crtc_state,
5303 connector_state);
5304 if (ret == -EDEADLK)
5305 return ret;
5306 if (ret < 0) {
5307 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
5308 encoder->base.base.id, encoder->base.name, ret);
5309 return ret;
5310 }
5311 }
5312
5313 /* Set default port clock if not overwritten by the encoder. Needs to be
5314 * done afterwards in case the encoder adjusts the mode. */
5315 if (!crtc_state->port_clock)
5316 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
5317 * crtc_state->pixel_multiplier;
5318
5319 ret = intel_crtc_compute_config(state, crtc);
5320 if (ret == -EDEADLK)
5321 return ret;
5322 if (ret == -EAGAIN) {
5323 if (drm_WARN(&i915->drm, !retry,
5324 "[CRTC:%d:%s] loop in pipe configuration computation\n",
5325 crtc->base.base.id, crtc->base.name))
5326 return -EINVAL;
5327
5328 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n",
5329 crtc->base.base.id, crtc->base.name);
5330 retry = false;
5331 goto encoder_retry;
5332 }
5333 if (ret < 0) {
5334 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
5335 crtc->base.base.id, crtc->base.name, ret);
5336 return ret;
5337 }
5338
5339 /* Dithering seems to not pass-through bits correctly when it should, so
5340 * only enable it on 6bpc panels and when its not a compliance
5341 * test requesting 6bpc video pattern.
5342 */
5343 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
5344 !crtc_state->dither_force_disable;
5345 drm_dbg_kms(&i915->drm,
5346 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
5347 crtc->base.base.id, crtc->base.name,
5348 base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
5349
5350 return 0;
5351 }
5352
5353 static int
intel_modeset_pipe_config_late(struct intel_atomic_state * state,struct intel_crtc * crtc)5354 intel_modeset_pipe_config_late(struct intel_atomic_state *state,
5355 struct intel_crtc *crtc)
5356 {
5357 struct intel_crtc_state *crtc_state =
5358 intel_atomic_get_new_crtc_state(state, crtc);
5359 struct drm_connector_state *conn_state;
5360 struct drm_connector *connector;
5361 int i;
5362
5363 intel_bigjoiner_adjust_pipe_src(crtc_state);
5364
5365 for_each_new_connector_in_state(&state->base, connector,
5366 conn_state, i) {
5367 struct intel_encoder *encoder =
5368 to_intel_encoder(conn_state->best_encoder);
5369 int ret;
5370
5371 if (conn_state->crtc != &crtc->base ||
5372 !encoder->compute_config_late)
5373 continue;
5374
5375 ret = encoder->compute_config_late(encoder, crtc_state,
5376 conn_state);
5377 if (ret)
5378 return ret;
5379 }
5380
5381 return 0;
5382 }
5383
intel_fuzzy_clock_check(int clock1,int clock2)5384 bool intel_fuzzy_clock_check(int clock1, int clock2)
5385 {
5386 int diff;
5387
5388 if (clock1 == clock2)
5389 return true;
5390
5391 if (!clock1 || !clock2)
5392 return false;
5393
5394 diff = abs(clock1 - clock2);
5395
5396 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
5397 return true;
5398
5399 return false;
5400 }
5401
5402 static bool
intel_compare_link_m_n(const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2)5403 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
5404 const struct intel_link_m_n *m2_n2)
5405 {
5406 return m_n->tu == m2_n2->tu &&
5407 m_n->data_m == m2_n2->data_m &&
5408 m_n->data_n == m2_n2->data_n &&
5409 m_n->link_m == m2_n2->link_m &&
5410 m_n->link_n == m2_n2->link_n;
5411 }
5412
5413 static bool
intel_compare_infoframe(const union hdmi_infoframe * a,const union hdmi_infoframe * b)5414 intel_compare_infoframe(const union hdmi_infoframe *a,
5415 const union hdmi_infoframe *b)
5416 {
5417 return memcmp(a, b, sizeof(*a)) == 0;
5418 }
5419
5420 static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)5421 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
5422 const struct drm_dp_vsc_sdp *b)
5423 {
5424 return memcmp(a, b, sizeof(*a)) == 0;
5425 }
5426
5427 static void
pipe_config_infoframe_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const union hdmi_infoframe * a,const union hdmi_infoframe * b)5428 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
5429 bool fastset, const char *name,
5430 const union hdmi_infoframe *a,
5431 const union hdmi_infoframe *b)
5432 {
5433 if (fastset) {
5434 if (!drm_debug_enabled(DRM_UT_KMS))
5435 return;
5436
5437 drm_dbg_kms(&dev_priv->drm,
5438 "fastset mismatch in %s infoframe\n", name);
5439 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5440 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
5441 drm_dbg_kms(&dev_priv->drm, "found:\n");
5442 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
5443 } else {
5444 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
5445 drm_err(&dev_priv->drm, "expected:\n");
5446 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
5447 drm_err(&dev_priv->drm, "found:\n");
5448 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
5449 }
5450 }
5451
5452 static void
pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)5453 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
5454 bool fastset, const char *name,
5455 const struct drm_dp_vsc_sdp *a,
5456 const struct drm_dp_vsc_sdp *b)
5457 {
5458 if (fastset) {
5459 if (!drm_debug_enabled(DRM_UT_KMS))
5460 return;
5461
5462 drm_dbg_kms(&dev_priv->drm,
5463 "fastset mismatch in %s dp sdp\n", name);
5464 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5465 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
5466 drm_dbg_kms(&dev_priv->drm, "found:\n");
5467 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
5468 } else {
5469 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
5470 drm_err(&dev_priv->drm, "expected:\n");
5471 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
5472 drm_err(&dev_priv->drm, "found:\n");
5473 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
5474 }
5475 }
5476
5477 static void __printf(4, 5)
pipe_config_mismatch(bool fastset,const struct intel_crtc * crtc,const char * name,const char * format,...)5478 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
5479 const char *name, const char *format, ...)
5480 {
5481 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5482 struct va_format vaf;
5483 va_list args;
5484
5485 va_start(args, format);
5486 vaf.fmt = format;
5487 vaf.va = &args;
5488
5489 if (fastset)
5490 drm_dbg_kms(&i915->drm,
5491 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
5492 crtc->base.base.id, crtc->base.name, name, &vaf);
5493 else
5494 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
5495 crtc->base.base.id, crtc->base.name, name, &vaf);
5496
5497 va_end(args);
5498 }
5499
fastboot_enabled(struct drm_i915_private * dev_priv)5500 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
5501 {
5502 if (dev_priv->params.fastboot != -1)
5503 return dev_priv->params.fastboot;
5504
5505 /* Enable fastboot by default on Skylake and newer */
5506 if (DISPLAY_VER(dev_priv) >= 9)
5507 return true;
5508
5509 /* Enable fastboot by default on VLV and CHV */
5510 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5511 return true;
5512
5513 /* Disabled by default on all others */
5514 return false;
5515 }
5516
5517 bool
intel_pipe_config_compare(const struct intel_crtc_state * current_config,const struct intel_crtc_state * pipe_config,bool fastset)5518 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
5519 const struct intel_crtc_state *pipe_config,
5520 bool fastset)
5521 {
5522 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
5523 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5524 bool ret = true;
5525 u32 bp_gamma = 0;
5526 bool fixup_inherited = fastset &&
5527 current_config->inherited && !pipe_config->inherited;
5528
5529 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
5530 drm_dbg_kms(&dev_priv->drm,
5531 "initial modeset and fastboot not set\n");
5532 ret = false;
5533 }
5534
5535 #define PIPE_CONF_CHECK_X(name) do { \
5536 if (current_config->name != pipe_config->name) { \
5537 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5538 "(expected 0x%08x, found 0x%08x)", \
5539 current_config->name, \
5540 pipe_config->name); \
5541 ret = false; \
5542 } \
5543 } while (0)
5544
5545 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
5546 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
5547 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5548 "(expected 0x%08x, found 0x%08x)", \
5549 current_config->name & (mask), \
5550 pipe_config->name & (mask)); \
5551 ret = false; \
5552 } \
5553 } while (0)
5554
5555 #define PIPE_CONF_CHECK_I(name) do { \
5556 if (current_config->name != pipe_config->name) { \
5557 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5558 "(expected %i, found %i)", \
5559 current_config->name, \
5560 pipe_config->name); \
5561 ret = false; \
5562 } \
5563 } while (0)
5564
5565 #define PIPE_CONF_CHECK_BOOL(name) do { \
5566 if (current_config->name != pipe_config->name) { \
5567 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5568 "(expected %s, found %s)", \
5569 str_yes_no(current_config->name), \
5570 str_yes_no(pipe_config->name)); \
5571 ret = false; \
5572 } \
5573 } while (0)
5574
5575 /*
5576 * Checks state where we only read out the enabling, but not the entire
5577 * state itself (like full infoframes or ELD for audio). These states
5578 * require a full modeset on bootup to fix up.
5579 */
5580 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
5581 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
5582 PIPE_CONF_CHECK_BOOL(name); \
5583 } else { \
5584 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5585 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
5586 str_yes_no(current_config->name), \
5587 str_yes_no(pipe_config->name)); \
5588 ret = false; \
5589 } \
5590 } while (0)
5591
5592 #define PIPE_CONF_CHECK_P(name) do { \
5593 if (current_config->name != pipe_config->name) { \
5594 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5595 "(expected %p, found %p)", \
5596 current_config->name, \
5597 pipe_config->name); \
5598 ret = false; \
5599 } \
5600 } while (0)
5601
5602 #define PIPE_CONF_CHECK_M_N(name) do { \
5603 if (!intel_compare_link_m_n(¤t_config->name, \
5604 &pipe_config->name)) { \
5605 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5606 "(expected tu %i data %i/%i link %i/%i, " \
5607 "found tu %i, data %i/%i link %i/%i)", \
5608 current_config->name.tu, \
5609 current_config->name.data_m, \
5610 current_config->name.data_n, \
5611 current_config->name.link_m, \
5612 current_config->name.link_n, \
5613 pipe_config->name.tu, \
5614 pipe_config->name.data_m, \
5615 pipe_config->name.data_n, \
5616 pipe_config->name.link_m, \
5617 pipe_config->name.link_n); \
5618 ret = false; \
5619 } \
5620 } while (0)
5621
5622 #define PIPE_CONF_CHECK_TIMINGS(name) do { \
5623 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
5624 PIPE_CONF_CHECK_I(name.crtc_htotal); \
5625 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
5626 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
5627 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
5628 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
5629 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
5630 PIPE_CONF_CHECK_I(name.crtc_vtotal); \
5631 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
5632 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
5633 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
5634 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
5635 } while (0)
5636
5637 #define PIPE_CONF_CHECK_RECT(name) do { \
5638 PIPE_CONF_CHECK_I(name.x1); \
5639 PIPE_CONF_CHECK_I(name.x2); \
5640 PIPE_CONF_CHECK_I(name.y1); \
5641 PIPE_CONF_CHECK_I(name.y2); \
5642 } while (0)
5643
5644 /* This is required for BDW+ where there is only one set of registers for
5645 * switching between high and low RR.
5646 * This macro can be used whenever a comparison has to be made between one
5647 * hw state and multiple sw state variables.
5648 */
5649 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
5650 if (!intel_compare_link_m_n(¤t_config->name, \
5651 &pipe_config->name) && \
5652 !intel_compare_link_m_n(¤t_config->alt_name, \
5653 &pipe_config->name)) { \
5654 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5655 "(expected tu %i data %i/%i link %i/%i, " \
5656 "or tu %i data %i/%i link %i/%i, " \
5657 "found tu %i, data %i/%i link %i/%i)", \
5658 current_config->name.tu, \
5659 current_config->name.data_m, \
5660 current_config->name.data_n, \
5661 current_config->name.link_m, \
5662 current_config->name.link_n, \
5663 current_config->alt_name.tu, \
5664 current_config->alt_name.data_m, \
5665 current_config->alt_name.data_n, \
5666 current_config->alt_name.link_m, \
5667 current_config->alt_name.link_n, \
5668 pipe_config->name.tu, \
5669 pipe_config->name.data_m, \
5670 pipe_config->name.data_n, \
5671 pipe_config->name.link_m, \
5672 pipe_config->name.link_n); \
5673 ret = false; \
5674 } \
5675 } while (0)
5676
5677 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
5678 if ((current_config->name ^ pipe_config->name) & (mask)) { \
5679 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5680 "(%x) (expected %i, found %i)", \
5681 (mask), \
5682 current_config->name & (mask), \
5683 pipe_config->name & (mask)); \
5684 ret = false; \
5685 } \
5686 } while (0)
5687
5688 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
5689 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
5690 &pipe_config->infoframes.name)) { \
5691 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
5692 ¤t_config->infoframes.name, \
5693 &pipe_config->infoframes.name); \
5694 ret = false; \
5695 } \
5696 } while (0)
5697
5698 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
5699 if (!current_config->has_psr && !pipe_config->has_psr && \
5700 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
5701 &pipe_config->infoframes.name)) { \
5702 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
5703 ¤t_config->infoframes.name, \
5704 &pipe_config->infoframes.name); \
5705 ret = false; \
5706 } \
5707 } while (0)
5708
5709 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
5710 if (current_config->name1 != pipe_config->name1) { \
5711 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
5712 "(expected %i, found %i, won't compare lut values)", \
5713 current_config->name1, \
5714 pipe_config->name1); \
5715 ret = false;\
5716 } else { \
5717 if (!intel_color_lut_equal(current_config->name2, \
5718 pipe_config->name2, pipe_config->name1, \
5719 bit_precision)) { \
5720 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
5721 "hw_state doesn't match sw_state"); \
5722 ret = false; \
5723 } \
5724 } \
5725 } while (0)
5726
5727 #define PIPE_CONF_QUIRK(quirk) \
5728 ((current_config->quirks | pipe_config->quirks) & (quirk))
5729
5730 PIPE_CONF_CHECK_I(hw.enable);
5731 PIPE_CONF_CHECK_I(hw.active);
5732
5733 PIPE_CONF_CHECK_I(cpu_transcoder);
5734 PIPE_CONF_CHECK_I(mst_master_transcoder);
5735
5736 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
5737 PIPE_CONF_CHECK_I(fdi_lanes);
5738 PIPE_CONF_CHECK_M_N(fdi_m_n);
5739
5740 PIPE_CONF_CHECK_I(lane_count);
5741 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
5742
5743 if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
5744 if (!fastset || !pipe_config->seamless_m_n)
5745 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
5746 } else {
5747 PIPE_CONF_CHECK_M_N(dp_m_n);
5748 PIPE_CONF_CHECK_M_N(dp_m2_n2);
5749 }
5750
5751 PIPE_CONF_CHECK_X(output_types);
5752
5753 PIPE_CONF_CHECK_I(framestart_delay);
5754 PIPE_CONF_CHECK_I(msa_timing_delay);
5755
5756 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
5757 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
5758
5759 PIPE_CONF_CHECK_I(pixel_multiplier);
5760
5761 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5762 DRM_MODE_FLAG_INTERLACE);
5763
5764 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
5765 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5766 DRM_MODE_FLAG_PHSYNC);
5767 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5768 DRM_MODE_FLAG_NHSYNC);
5769 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5770 DRM_MODE_FLAG_PVSYNC);
5771 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5772 DRM_MODE_FLAG_NVSYNC);
5773 }
5774
5775 PIPE_CONF_CHECK_I(output_format);
5776 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
5777 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
5778 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5779 PIPE_CONF_CHECK_BOOL(limited_color_range);
5780
5781 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
5782 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
5783 PIPE_CONF_CHECK_BOOL(has_infoframe);
5784 PIPE_CONF_CHECK_BOOL(fec_enable);
5785
5786 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
5787
5788 PIPE_CONF_CHECK_X(gmch_pfit.control);
5789 /* pfit ratios are autocomputed by the hw on gen4+ */
5790 if (DISPLAY_VER(dev_priv) < 4)
5791 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
5792 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5793
5794 /*
5795 * Changing the EDP transcoder input mux
5796 * (A_ONOFF vs. A_ON) requires a full modeset.
5797 */
5798 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
5799
5800 if (!fastset) {
5801 PIPE_CONF_CHECK_RECT(pipe_src);
5802
5803 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
5804 PIPE_CONF_CHECK_RECT(pch_pfit.dst);
5805
5806 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
5807 PIPE_CONF_CHECK_I(pixel_rate);
5808
5809 PIPE_CONF_CHECK_X(gamma_mode);
5810 if (IS_CHERRYVIEW(dev_priv))
5811 PIPE_CONF_CHECK_X(cgm_mode);
5812 else
5813 PIPE_CONF_CHECK_X(csc_mode);
5814 PIPE_CONF_CHECK_BOOL(gamma_enable);
5815 PIPE_CONF_CHECK_BOOL(csc_enable);
5816
5817 PIPE_CONF_CHECK_I(linetime);
5818 PIPE_CONF_CHECK_I(ips_linetime);
5819
5820 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
5821 if (bp_gamma)
5822 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
5823
5824 if (current_config->active_planes) {
5825 PIPE_CONF_CHECK_BOOL(has_psr);
5826 PIPE_CONF_CHECK_BOOL(has_psr2);
5827 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
5828 PIPE_CONF_CHECK_I(dc3co_exitline);
5829 }
5830 }
5831
5832 PIPE_CONF_CHECK_BOOL(double_wide);
5833
5834 if (dev_priv->display.dpll.mgr) {
5835 PIPE_CONF_CHECK_P(shared_dpll);
5836
5837 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
5838 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
5839 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
5840 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5841 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
5842 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5843 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
5844 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
5845 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
5846 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
5847 PIPE_CONF_CHECK_X(dpll_hw_state.div0);
5848 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
5849 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
5850 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
5851 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
5852 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
5853 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
5854 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
5855 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
5856 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
5857 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
5858 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
5859 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
5860 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
5861 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
5862 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
5863 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
5864 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
5865 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
5866 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
5867 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
5868 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
5869 }
5870
5871 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
5872 PIPE_CONF_CHECK_X(dsi_pll.div);
5873
5874 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
5875 PIPE_CONF_CHECK_I(pipe_bpp);
5876
5877 if (!fastset || !pipe_config->seamless_m_n) {
5878 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
5879 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
5880 }
5881 PIPE_CONF_CHECK_I(port_clock);
5882
5883 PIPE_CONF_CHECK_I(min_voltage_level);
5884
5885 if (current_config->has_psr || pipe_config->has_psr)
5886 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
5887 ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
5888 else
5889 PIPE_CONF_CHECK_X(infoframes.enable);
5890
5891 PIPE_CONF_CHECK_X(infoframes.gcp);
5892 PIPE_CONF_CHECK_INFOFRAME(avi);
5893 PIPE_CONF_CHECK_INFOFRAME(spd);
5894 PIPE_CONF_CHECK_INFOFRAME(hdmi);
5895 PIPE_CONF_CHECK_INFOFRAME(drm);
5896 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
5897
5898 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
5899 PIPE_CONF_CHECK_I(master_transcoder);
5900 PIPE_CONF_CHECK_X(bigjoiner_pipes);
5901
5902 PIPE_CONF_CHECK_I(dsc.compression_enable);
5903 PIPE_CONF_CHECK_I(dsc.dsc_split);
5904 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
5905
5906 PIPE_CONF_CHECK_BOOL(splitter.enable);
5907 PIPE_CONF_CHECK_I(splitter.link_count);
5908 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
5909
5910 PIPE_CONF_CHECK_BOOL(vrr.enable);
5911 PIPE_CONF_CHECK_I(vrr.vmin);
5912 PIPE_CONF_CHECK_I(vrr.vmax);
5913 PIPE_CONF_CHECK_I(vrr.flipline);
5914 PIPE_CONF_CHECK_I(vrr.pipeline_full);
5915 PIPE_CONF_CHECK_I(vrr.guardband);
5916
5917 #undef PIPE_CONF_CHECK_X
5918 #undef PIPE_CONF_CHECK_I
5919 #undef PIPE_CONF_CHECK_BOOL
5920 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
5921 #undef PIPE_CONF_CHECK_P
5922 #undef PIPE_CONF_CHECK_FLAGS
5923 #undef PIPE_CONF_CHECK_COLOR_LUT
5924 #undef PIPE_CONF_CHECK_TIMINGS
5925 #undef PIPE_CONF_CHECK_RECT
5926 #undef PIPE_CONF_QUIRK
5927
5928 return ret;
5929 }
5930
5931 static void
intel_verify_planes(struct intel_atomic_state * state)5932 intel_verify_planes(struct intel_atomic_state *state)
5933 {
5934 struct intel_plane *plane;
5935 const struct intel_plane_state *plane_state;
5936 int i;
5937
5938 for_each_new_intel_plane_in_state(state, plane,
5939 plane_state, i)
5940 assert_plane(plane, plane_state->planar_slave ||
5941 plane_state->uapi.visible);
5942 }
5943
intel_modeset_all_pipes(struct intel_atomic_state * state)5944 int intel_modeset_all_pipes(struct intel_atomic_state *state)
5945 {
5946 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5947 struct intel_crtc *crtc;
5948
5949 /*
5950 * Add all pipes to the state, and force
5951 * a modeset on all the active ones.
5952 */
5953 for_each_intel_crtc(&dev_priv->drm, crtc) {
5954 struct intel_crtc_state *crtc_state;
5955 int ret;
5956
5957 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5958 if (IS_ERR(crtc_state))
5959 return PTR_ERR(crtc_state);
5960
5961 if (!crtc_state->hw.active ||
5962 drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
5963 continue;
5964
5965 crtc_state->uapi.mode_changed = true;
5966
5967 ret = drm_atomic_add_affected_connectors(&state->base,
5968 &crtc->base);
5969 if (ret)
5970 return ret;
5971
5972 ret = intel_atomic_add_affected_planes(state, crtc);
5973 if (ret)
5974 return ret;
5975
5976 crtc_state->update_planes |= crtc_state->active_planes;
5977 }
5978
5979 return 0;
5980 }
5981
intel_crtc_update_active_timings(const struct intel_crtc_state * crtc_state)5982 void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
5983 {
5984 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5985 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5986 struct drm_display_mode adjusted_mode;
5987
5988 drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
5989
5990 if (crtc_state->vrr.enable) {
5991 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
5992 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
5993 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
5994 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
5995 }
5996
5997 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
5998
5999 crtc->mode_flags = crtc_state->mode_flags;
6000
6001 /*
6002 * The scanline counter increments at the leading edge of hsync.
6003 *
6004 * On most platforms it starts counting from vtotal-1 on the
6005 * first active line. That means the scanline counter value is
6006 * always one less than what we would expect. Ie. just after
6007 * start of vblank, which also occurs at start of hsync (on the
6008 * last active line), the scanline counter will read vblank_start-1.
6009 *
6010 * On gen2 the scanline counter starts counting from 1 instead
6011 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
6012 * to keep the value positive), instead of adding one.
6013 *
6014 * On HSW+ the behaviour of the scanline counter depends on the output
6015 * type. For DP ports it behaves like most other platforms, but on HDMI
6016 * there's an extra 1 line difference. So we need to add two instead of
6017 * one to the value.
6018 *
6019 * On VLV/CHV DSI the scanline counter would appear to increment
6020 * approx. 1/3 of a scanline before start of vblank. Unfortunately
6021 * that means we can't tell whether we're in vblank or not while
6022 * we're on that particular line. We must still set scanline_offset
6023 * to 1 so that the vblank timestamps come out correct when we query
6024 * the scanline counter from within the vblank interrupt handler.
6025 * However if queried just before the start of vblank we'll get an
6026 * answer that's slightly in the future.
6027 */
6028 if (DISPLAY_VER(dev_priv) == 2) {
6029 int vtotal;
6030
6031 vtotal = adjusted_mode.crtc_vtotal;
6032 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6033 vtotal /= 2;
6034
6035 crtc->scanline_offset = vtotal - 1;
6036 } else if (HAS_DDI(dev_priv) &&
6037 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
6038 crtc->scanline_offset = 2;
6039 } else {
6040 crtc->scanline_offset = 1;
6041 }
6042 }
6043
6044 /*
6045 * This implements the workaround described in the "notes" section of the mode
6046 * set sequence documentation. When going from no pipes or single pipe to
6047 * multiple pipes, and planes are enabled after the pipe, we need to wait at
6048 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
6049 */
hsw_mode_set_planes_workaround(struct intel_atomic_state * state)6050 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
6051 {
6052 struct intel_crtc_state *crtc_state;
6053 struct intel_crtc *crtc;
6054 struct intel_crtc_state *first_crtc_state = NULL;
6055 struct intel_crtc_state *other_crtc_state = NULL;
6056 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
6057 int i;
6058
6059 /* look at all crtc's that are going to be enabled in during modeset */
6060 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6061 if (!crtc_state->hw.active ||
6062 !intel_crtc_needs_modeset(crtc_state))
6063 continue;
6064
6065 if (first_crtc_state) {
6066 other_crtc_state = crtc_state;
6067 break;
6068 } else {
6069 first_crtc_state = crtc_state;
6070 first_pipe = crtc->pipe;
6071 }
6072 }
6073
6074 /* No workaround needed? */
6075 if (!first_crtc_state)
6076 return 0;
6077
6078 /* w/a possibly needed, check how many crtc's are already enabled. */
6079 for_each_intel_crtc(state->base.dev, crtc) {
6080 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6081 if (IS_ERR(crtc_state))
6082 return PTR_ERR(crtc_state);
6083
6084 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
6085
6086 if (!crtc_state->hw.active ||
6087 intel_crtc_needs_modeset(crtc_state))
6088 continue;
6089
6090 /* 2 or more enabled crtcs means no need for w/a */
6091 if (enabled_pipe != INVALID_PIPE)
6092 return 0;
6093
6094 enabled_pipe = crtc->pipe;
6095 }
6096
6097 if (enabled_pipe != INVALID_PIPE)
6098 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
6099 else if (other_crtc_state)
6100 other_crtc_state->hsw_workaround_pipe = first_pipe;
6101
6102 return 0;
6103 }
6104
intel_calc_active_pipes(struct intel_atomic_state * state,u8 active_pipes)6105 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
6106 u8 active_pipes)
6107 {
6108 const struct intel_crtc_state *crtc_state;
6109 struct intel_crtc *crtc;
6110 int i;
6111
6112 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6113 if (crtc_state->hw.active)
6114 active_pipes |= BIT(crtc->pipe);
6115 else
6116 active_pipes &= ~BIT(crtc->pipe);
6117 }
6118
6119 return active_pipes;
6120 }
6121
intel_modeset_checks(struct intel_atomic_state * state)6122 static int intel_modeset_checks(struct intel_atomic_state *state)
6123 {
6124 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6125
6126 state->modeset = true;
6127
6128 if (IS_HASWELL(dev_priv))
6129 return hsw_mode_set_planes_workaround(state);
6130
6131 return 0;
6132 }
6133
intel_crtc_check_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)6134 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
6135 struct intel_crtc_state *new_crtc_state)
6136 {
6137 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
6138 return;
6139
6140 new_crtc_state->uapi.mode_changed = false;
6141 new_crtc_state->update_pipe = true;
6142 }
6143
intel_crtc_add_planes_to_state(struct intel_atomic_state * state,struct intel_crtc * crtc,u8 plane_ids_mask)6144 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
6145 struct intel_crtc *crtc,
6146 u8 plane_ids_mask)
6147 {
6148 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6149 struct intel_plane *plane;
6150
6151 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6152 struct intel_plane_state *plane_state;
6153
6154 if ((plane_ids_mask & BIT(plane->id)) == 0)
6155 continue;
6156
6157 plane_state = intel_atomic_get_plane_state(state, plane);
6158 if (IS_ERR(plane_state))
6159 return PTR_ERR(plane_state);
6160 }
6161
6162 return 0;
6163 }
6164
intel_atomic_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)6165 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
6166 struct intel_crtc *crtc)
6167 {
6168 const struct intel_crtc_state *old_crtc_state =
6169 intel_atomic_get_old_crtc_state(state, crtc);
6170 const struct intel_crtc_state *new_crtc_state =
6171 intel_atomic_get_new_crtc_state(state, crtc);
6172
6173 return intel_crtc_add_planes_to_state(state, crtc,
6174 old_crtc_state->enabled_planes |
6175 new_crtc_state->enabled_planes);
6176 }
6177
active_planes_affects_min_cdclk(struct drm_i915_private * dev_priv)6178 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
6179 {
6180 /* See {hsw,vlv,ivb}_plane_ratio() */
6181 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
6182 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
6183 IS_IVYBRIDGE(dev_priv);
6184 }
6185
intel_crtc_add_bigjoiner_planes(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc * other)6186 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
6187 struct intel_crtc *crtc,
6188 struct intel_crtc *other)
6189 {
6190 const struct intel_plane_state *plane_state;
6191 struct intel_plane *plane;
6192 u8 plane_ids = 0;
6193 int i;
6194
6195 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6196 if (plane->pipe == crtc->pipe)
6197 plane_ids |= BIT(plane->id);
6198 }
6199
6200 return intel_crtc_add_planes_to_state(state, other, plane_ids);
6201 }
6202
intel_bigjoiner_add_affected_planes(struct intel_atomic_state * state)6203 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
6204 {
6205 struct drm_i915_private *i915 = to_i915(state->base.dev);
6206 const struct intel_crtc_state *crtc_state;
6207 struct intel_crtc *crtc;
6208 int i;
6209
6210 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6211 struct intel_crtc *other;
6212
6213 for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
6214 crtc_state->bigjoiner_pipes) {
6215 int ret;
6216
6217 if (crtc == other)
6218 continue;
6219
6220 ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
6221 if (ret)
6222 return ret;
6223 }
6224 }
6225
6226 return 0;
6227 }
6228
intel_atomic_check_planes(struct intel_atomic_state * state)6229 static int intel_atomic_check_planes(struct intel_atomic_state *state)
6230 {
6231 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6232 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6233 struct intel_plane_state *plane_state;
6234 struct intel_plane *plane;
6235 struct intel_crtc *crtc;
6236 int i, ret;
6237
6238 ret = icl_add_linked_planes(state);
6239 if (ret)
6240 return ret;
6241
6242 ret = intel_bigjoiner_add_affected_planes(state);
6243 if (ret)
6244 return ret;
6245
6246 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6247 ret = intel_plane_atomic_check(state, plane);
6248 if (ret) {
6249 drm_dbg_atomic(&dev_priv->drm,
6250 "[PLANE:%d:%s] atomic driver check failed\n",
6251 plane->base.base.id, plane->base.name);
6252 return ret;
6253 }
6254 }
6255
6256 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6257 new_crtc_state, i) {
6258 u8 old_active_planes, new_active_planes;
6259
6260 ret = icl_check_nv12_planes(new_crtc_state);
6261 if (ret)
6262 return ret;
6263
6264 /*
6265 * On some platforms the number of active planes affects
6266 * the planes' minimum cdclk calculation. Add such planes
6267 * to the state before we compute the minimum cdclk.
6268 */
6269 if (!active_planes_affects_min_cdclk(dev_priv))
6270 continue;
6271
6272 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6273 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6274
6275 if (hweight8(old_active_planes) == hweight8(new_active_planes))
6276 continue;
6277
6278 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
6279 if (ret)
6280 return ret;
6281 }
6282
6283 return 0;
6284 }
6285
intel_atomic_check_crtcs(struct intel_atomic_state * state)6286 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
6287 {
6288 struct intel_crtc_state *crtc_state;
6289 struct intel_crtc *crtc;
6290 int i;
6291
6292 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6293 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6294 int ret;
6295
6296 ret = intel_crtc_atomic_check(state, crtc);
6297 if (ret) {
6298 drm_dbg_atomic(&i915->drm,
6299 "[CRTC:%d:%s] atomic driver check failed\n",
6300 crtc->base.base.id, crtc->base.name);
6301 return ret;
6302 }
6303 }
6304
6305 return 0;
6306 }
6307
intel_cpu_transcoders_need_modeset(struct intel_atomic_state * state,u8 transcoders)6308 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
6309 u8 transcoders)
6310 {
6311 const struct intel_crtc_state *new_crtc_state;
6312 struct intel_crtc *crtc;
6313 int i;
6314
6315 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6316 if (new_crtc_state->hw.enable &&
6317 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
6318 intel_crtc_needs_modeset(new_crtc_state))
6319 return true;
6320 }
6321
6322 return false;
6323 }
6324
intel_pipes_need_modeset(struct intel_atomic_state * state,u8 pipes)6325 static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
6326 u8 pipes)
6327 {
6328 const struct intel_crtc_state *new_crtc_state;
6329 struct intel_crtc *crtc;
6330 int i;
6331
6332 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6333 if (new_crtc_state->hw.enable &&
6334 pipes & BIT(crtc->pipe) &&
6335 intel_crtc_needs_modeset(new_crtc_state))
6336 return true;
6337 }
6338
6339 return false;
6340 }
6341
intel_atomic_check_bigjoiner(struct intel_atomic_state * state,struct intel_crtc * master_crtc)6342 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
6343 struct intel_crtc *master_crtc)
6344 {
6345 struct drm_i915_private *i915 = to_i915(state->base.dev);
6346 struct intel_crtc_state *master_crtc_state =
6347 intel_atomic_get_new_crtc_state(state, master_crtc);
6348 struct intel_crtc *slave_crtc;
6349
6350 if (!master_crtc_state->bigjoiner_pipes)
6351 return 0;
6352
6353 /* sanity check */
6354 if (drm_WARN_ON(&i915->drm,
6355 master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
6356 return -EINVAL;
6357
6358 if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
6359 drm_dbg_kms(&i915->drm,
6360 "[CRTC:%d:%s] Cannot act as big joiner master "
6361 "(need 0x%x as pipes, only 0x%x possible)\n",
6362 master_crtc->base.base.id, master_crtc->base.name,
6363 master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
6364 return -EINVAL;
6365 }
6366
6367 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6368 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6369 struct intel_crtc_state *slave_crtc_state;
6370 int ret;
6371
6372 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
6373 if (IS_ERR(slave_crtc_state))
6374 return PTR_ERR(slave_crtc_state);
6375
6376 /* master being enabled, slave was already configured? */
6377 if (slave_crtc_state->uapi.enable) {
6378 drm_dbg_kms(&i915->drm,
6379 "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
6380 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
6381 slave_crtc->base.base.id, slave_crtc->base.name,
6382 master_crtc->base.base.id, master_crtc->base.name);
6383 return -EINVAL;
6384 }
6385
6386 /*
6387 * The state copy logic assumes the master crtc gets processed
6388 * before the slave crtc during the main compute_config loop.
6389 * This works because the crtcs are created in pipe order,
6390 * and the hardware requires master pipe < slave pipe as well.
6391 * Should that change we need to rethink the logic.
6392 */
6393 if (WARN_ON(drm_crtc_index(&master_crtc->base) >
6394 drm_crtc_index(&slave_crtc->base)))
6395 return -EINVAL;
6396
6397 drm_dbg_kms(&i915->drm,
6398 "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
6399 slave_crtc->base.base.id, slave_crtc->base.name,
6400 master_crtc->base.base.id, master_crtc->base.name);
6401
6402 slave_crtc_state->bigjoiner_pipes =
6403 master_crtc_state->bigjoiner_pipes;
6404
6405 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
6406 if (ret)
6407 return ret;
6408 }
6409
6410 return 0;
6411 }
6412
kill_bigjoiner_slave(struct intel_atomic_state * state,struct intel_crtc * master_crtc)6413 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
6414 struct intel_crtc *master_crtc)
6415 {
6416 struct drm_i915_private *i915 = to_i915(state->base.dev);
6417 struct intel_crtc_state *master_crtc_state =
6418 intel_atomic_get_new_crtc_state(state, master_crtc);
6419 struct intel_crtc *slave_crtc;
6420
6421 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6422 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6423 struct intel_crtc_state *slave_crtc_state =
6424 intel_atomic_get_new_crtc_state(state, slave_crtc);
6425
6426 slave_crtc_state->bigjoiner_pipes = 0;
6427
6428 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
6429 }
6430
6431 master_crtc_state->bigjoiner_pipes = 0;
6432 }
6433
6434 /**
6435 * DOC: asynchronous flip implementation
6436 *
6437 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
6438 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
6439 * Correspondingly, support is currently added for primary plane only.
6440 *
6441 * Async flip can only change the plane surface address, so anything else
6442 * changing is rejected from the intel_async_flip_check_hw() function.
6443 * Once this check is cleared, flip done interrupt is enabled using
6444 * the intel_crtc_enable_flip_done() function.
6445 *
6446 * As soon as the surface address register is written, flip done interrupt is
6447 * generated and the requested events are sent to the usersapce in the interrupt
6448 * handler itself. The timestamp and sequence sent during the flip done event
6449 * correspond to the last vblank and have no relation to the actual time when
6450 * the flip done event was sent.
6451 */
intel_async_flip_check_uapi(struct intel_atomic_state * state,struct intel_crtc * crtc)6452 static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
6453 struct intel_crtc *crtc)
6454 {
6455 struct drm_i915_private *i915 = to_i915(state->base.dev);
6456 const struct intel_crtc_state *new_crtc_state =
6457 intel_atomic_get_new_crtc_state(state, crtc);
6458 const struct intel_plane_state *old_plane_state;
6459 struct intel_plane_state *new_plane_state;
6460 struct intel_plane *plane;
6461 int i;
6462
6463 if (!new_crtc_state->uapi.async_flip)
6464 return 0;
6465
6466 if (!new_crtc_state->uapi.active) {
6467 drm_dbg_kms(&i915->drm,
6468 "[CRTC:%d:%s] not active\n",
6469 crtc->base.base.id, crtc->base.name);
6470 return -EINVAL;
6471 }
6472
6473 if (intel_crtc_needs_modeset(new_crtc_state)) {
6474 drm_dbg_kms(&i915->drm,
6475 "[CRTC:%d:%s] modeset required\n",
6476 crtc->base.base.id, crtc->base.name);
6477 return -EINVAL;
6478 }
6479
6480 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6481 new_plane_state, i) {
6482 if (plane->pipe != crtc->pipe)
6483 continue;
6484
6485 /*
6486 * TODO: Async flip is only supported through the page flip IOCTL
6487 * as of now. So support currently added for primary plane only.
6488 * Support for other planes on platforms on which supports
6489 * this(vlv/chv and icl+) should be added when async flip is
6490 * enabled in the atomic IOCTL path.
6491 */
6492 if (!plane->async_flip) {
6493 drm_dbg_kms(&i915->drm,
6494 "[PLANE:%d:%s] async flip not supported\n",
6495 plane->base.base.id, plane->base.name);
6496 return -EINVAL;
6497 }
6498
6499 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
6500 drm_dbg_kms(&i915->drm,
6501 "[PLANE:%d:%s] no old or new framebuffer\n",
6502 plane->base.base.id, plane->base.name);
6503 return -EINVAL;
6504 }
6505 }
6506
6507 return 0;
6508 }
6509
intel_async_flip_check_hw(struct intel_atomic_state * state,struct intel_crtc * crtc)6510 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
6511 {
6512 struct drm_i915_private *i915 = to_i915(state->base.dev);
6513 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6514 const struct intel_plane_state *new_plane_state, *old_plane_state;
6515 struct intel_plane *plane;
6516 int i;
6517
6518 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6519 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6520
6521 if (!new_crtc_state->uapi.async_flip)
6522 return 0;
6523
6524 if (!new_crtc_state->hw.active) {
6525 drm_dbg_kms(&i915->drm,
6526 "[CRTC:%d:%s] not active\n",
6527 crtc->base.base.id, crtc->base.name);
6528 return -EINVAL;
6529 }
6530
6531 if (intel_crtc_needs_modeset(new_crtc_state)) {
6532 drm_dbg_kms(&i915->drm,
6533 "[CRTC:%d:%s] modeset required\n",
6534 crtc->base.base.id, crtc->base.name);
6535 return -EINVAL;
6536 }
6537
6538 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
6539 drm_dbg_kms(&i915->drm,
6540 "[CRTC:%d:%s] Active planes cannot be in async flip\n",
6541 crtc->base.base.id, crtc->base.name);
6542 return -EINVAL;
6543 }
6544
6545 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6546 new_plane_state, i) {
6547 if (plane->pipe != crtc->pipe)
6548 continue;
6549
6550 /*
6551 * Only async flip capable planes should be in the state
6552 * if we're really about to ask the hardware to perform
6553 * an async flip. We should never get this far otherwise.
6554 */
6555 if (drm_WARN_ON(&i915->drm,
6556 new_crtc_state->do_async_flip && !plane->async_flip))
6557 return -EINVAL;
6558
6559 /*
6560 * Only check async flip capable planes other planes
6561 * may be involved in the initial commit due to
6562 * the wm0/ddb optimization.
6563 *
6564 * TODO maybe should track which planes actually
6565 * were requested to do the async flip...
6566 */
6567 if (!plane->async_flip)
6568 continue;
6569
6570 /*
6571 * FIXME: This check is kept generic for all platforms.
6572 * Need to verify this for all gen9 platforms to enable
6573 * this selectively if required.
6574 */
6575 switch (new_plane_state->hw.fb->modifier) {
6576 case I915_FORMAT_MOD_X_TILED:
6577 case I915_FORMAT_MOD_Y_TILED:
6578 case I915_FORMAT_MOD_Yf_TILED:
6579 case I915_FORMAT_MOD_4_TILED:
6580 break;
6581 default:
6582 drm_dbg_kms(&i915->drm,
6583 "[PLANE:%d:%s] Modifier does not support async flips\n",
6584 plane->base.base.id, plane->base.name);
6585 return -EINVAL;
6586 }
6587
6588 if (new_plane_state->hw.fb->format->num_planes > 1) {
6589 drm_dbg_kms(&i915->drm,
6590 "[PLANE:%d:%s] Planar formats do not support async flips\n",
6591 plane->base.base.id, plane->base.name);
6592 return -EINVAL;
6593 }
6594
6595 if (old_plane_state->view.color_plane[0].mapping_stride !=
6596 new_plane_state->view.color_plane[0].mapping_stride) {
6597 drm_dbg_kms(&i915->drm,
6598 "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
6599 plane->base.base.id, plane->base.name);
6600 return -EINVAL;
6601 }
6602
6603 if (old_plane_state->hw.fb->modifier !=
6604 new_plane_state->hw.fb->modifier) {
6605 drm_dbg_kms(&i915->drm,
6606 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
6607 plane->base.base.id, plane->base.name);
6608 return -EINVAL;
6609 }
6610
6611 if (old_plane_state->hw.fb->format !=
6612 new_plane_state->hw.fb->format) {
6613 drm_dbg_kms(&i915->drm,
6614 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
6615 plane->base.base.id, plane->base.name);
6616 return -EINVAL;
6617 }
6618
6619 if (old_plane_state->hw.rotation !=
6620 new_plane_state->hw.rotation) {
6621 drm_dbg_kms(&i915->drm,
6622 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
6623 plane->base.base.id, plane->base.name);
6624 return -EINVAL;
6625 }
6626
6627 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
6628 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
6629 drm_dbg_kms(&i915->drm,
6630 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
6631 plane->base.base.id, plane->base.name);
6632 return -EINVAL;
6633 }
6634
6635 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
6636 drm_dbg_kms(&i915->drm,
6637 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
6638 plane->base.base.id, plane->base.name);
6639 return -EINVAL;
6640 }
6641
6642 if (old_plane_state->hw.pixel_blend_mode !=
6643 new_plane_state->hw.pixel_blend_mode) {
6644 drm_dbg_kms(&i915->drm,
6645 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
6646 plane->base.base.id, plane->base.name);
6647 return -EINVAL;
6648 }
6649
6650 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
6651 drm_dbg_kms(&i915->drm,
6652 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
6653 plane->base.base.id, plane->base.name);
6654 return -EINVAL;
6655 }
6656
6657 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
6658 drm_dbg_kms(&i915->drm,
6659 "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
6660 plane->base.base.id, plane->base.name);
6661 return -EINVAL;
6662 }
6663
6664 /* plane decryption is allow to change only in synchronous flips */
6665 if (old_plane_state->decrypt != new_plane_state->decrypt) {
6666 drm_dbg_kms(&i915->drm,
6667 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
6668 plane->base.base.id, plane->base.name);
6669 return -EINVAL;
6670 }
6671 }
6672
6673 return 0;
6674 }
6675
intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state * state)6676 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
6677 {
6678 struct drm_i915_private *i915 = to_i915(state->base.dev);
6679 struct intel_crtc_state *crtc_state;
6680 struct intel_crtc *crtc;
6681 u8 affected_pipes = 0;
6682 u8 modeset_pipes = 0;
6683 int i;
6684
6685 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6686 affected_pipes |= crtc_state->bigjoiner_pipes;
6687 if (intel_crtc_needs_modeset(crtc_state))
6688 modeset_pipes |= crtc_state->bigjoiner_pipes;
6689 }
6690
6691 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
6692 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6693 if (IS_ERR(crtc_state))
6694 return PTR_ERR(crtc_state);
6695 }
6696
6697 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
6698 int ret;
6699
6700 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6701
6702 crtc_state->uapi.mode_changed = true;
6703
6704 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6705 if (ret)
6706 return ret;
6707
6708 ret = intel_atomic_add_affected_planes(state, crtc);
6709 if (ret)
6710 return ret;
6711 }
6712
6713 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6714 /* Kill old bigjoiner link, we may re-establish afterwards */
6715 if (intel_crtc_needs_modeset(crtc_state) &&
6716 intel_crtc_is_bigjoiner_master(crtc_state))
6717 kill_bigjoiner_slave(state, crtc);
6718 }
6719
6720 return 0;
6721 }
6722
6723 /**
6724 * intel_atomic_check - validate state object
6725 * @dev: drm device
6726 * @_state: state to validate
6727 */
intel_atomic_check(struct drm_device * dev,struct drm_atomic_state * _state)6728 static int intel_atomic_check(struct drm_device *dev,
6729 struct drm_atomic_state *_state)
6730 {
6731 struct drm_i915_private *dev_priv = to_i915(dev);
6732 struct intel_atomic_state *state = to_intel_atomic_state(_state);
6733 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6734 struct intel_crtc *crtc;
6735 int ret, i;
6736 bool any_ms = false;
6737
6738 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6739 new_crtc_state, i) {
6740 if (new_crtc_state->inherited != old_crtc_state->inherited)
6741 new_crtc_state->uapi.mode_changed = true;
6742
6743 if (new_crtc_state->uapi.scaling_filter !=
6744 old_crtc_state->uapi.scaling_filter)
6745 new_crtc_state->uapi.mode_changed = true;
6746 }
6747
6748 intel_vrr_check_modeset(state);
6749
6750 ret = drm_atomic_helper_check_modeset(dev, &state->base);
6751 if (ret)
6752 goto fail;
6753
6754 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6755 ret = intel_async_flip_check_uapi(state, crtc);
6756 if (ret)
6757 return ret;
6758 }
6759
6760 ret = intel_bigjoiner_add_affected_crtcs(state);
6761 if (ret)
6762 goto fail;
6763
6764 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6765 new_crtc_state, i) {
6766 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6767 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6768 copy_bigjoiner_crtc_state_nomodeset(state, crtc);
6769 else
6770 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
6771 continue;
6772 }
6773
6774 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
6775 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
6776 continue;
6777 }
6778
6779 ret = intel_crtc_prepare_cleared_state(state, crtc);
6780 if (ret)
6781 goto fail;
6782
6783 if (!new_crtc_state->hw.enable)
6784 continue;
6785
6786 ret = intel_modeset_pipe_config(state, crtc);
6787 if (ret)
6788 goto fail;
6789
6790 ret = intel_atomic_check_bigjoiner(state, crtc);
6791 if (ret)
6792 goto fail;
6793 }
6794
6795 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6796 new_crtc_state, i) {
6797 if (!intel_crtc_needs_modeset(new_crtc_state))
6798 continue;
6799
6800 if (new_crtc_state->hw.enable) {
6801 ret = intel_modeset_pipe_config_late(state, crtc);
6802 if (ret)
6803 goto fail;
6804 }
6805
6806 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
6807 }
6808
6809 /**
6810 * Check if fastset is allowed by external dependencies like other
6811 * pipes and transcoders.
6812 *
6813 * Right now it only forces a fullmodeset when the MST master
6814 * transcoder did not changed but the pipe of the master transcoder
6815 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
6816 * in case of port synced crtcs, if one of the synced crtcs
6817 * needs a full modeset, all other synced crtcs should be
6818 * forced a full modeset.
6819 */
6820 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6821 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
6822 continue;
6823
6824 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
6825 enum transcoder master = new_crtc_state->mst_master_transcoder;
6826
6827 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
6828 new_crtc_state->uapi.mode_changed = true;
6829 new_crtc_state->update_pipe = false;
6830 }
6831 }
6832
6833 if (is_trans_port_sync_mode(new_crtc_state)) {
6834 u8 trans = new_crtc_state->sync_mode_slaves_mask;
6835
6836 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
6837 trans |= BIT(new_crtc_state->master_transcoder);
6838
6839 if (intel_cpu_transcoders_need_modeset(state, trans)) {
6840 new_crtc_state->uapi.mode_changed = true;
6841 new_crtc_state->update_pipe = false;
6842 }
6843 }
6844
6845 if (new_crtc_state->bigjoiner_pipes) {
6846 if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
6847 new_crtc_state->uapi.mode_changed = true;
6848 new_crtc_state->update_pipe = false;
6849 }
6850 }
6851 }
6852
6853 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6854 new_crtc_state, i) {
6855 if (!intel_crtc_needs_modeset(new_crtc_state))
6856 continue;
6857
6858 any_ms = true;
6859
6860 intel_release_shared_dplls(state, crtc);
6861 }
6862
6863 if (any_ms && !check_digital_port_conflicts(state)) {
6864 drm_dbg_kms(&dev_priv->drm,
6865 "rejecting conflicting digital port configuration\n");
6866 ret = -EINVAL;
6867 goto fail;
6868 }
6869
6870 ret = drm_dp_mst_atomic_check(&state->base);
6871 if (ret)
6872 goto fail;
6873
6874 ret = intel_atomic_check_planes(state);
6875 if (ret)
6876 goto fail;
6877
6878 ret = intel_compute_global_watermarks(state);
6879 if (ret)
6880 goto fail;
6881
6882 ret = intel_bw_atomic_check(state);
6883 if (ret)
6884 goto fail;
6885
6886 ret = intel_cdclk_atomic_check(state, &any_ms);
6887 if (ret)
6888 goto fail;
6889
6890 if (intel_any_crtc_needs_modeset(state))
6891 any_ms = true;
6892
6893 if (any_ms) {
6894 ret = intel_modeset_checks(state);
6895 if (ret)
6896 goto fail;
6897
6898 ret = intel_modeset_calc_cdclk(state);
6899 if (ret)
6900 return ret;
6901 }
6902
6903 ret = intel_atomic_check_crtcs(state);
6904 if (ret)
6905 goto fail;
6906
6907 ret = intel_fbc_atomic_check(state);
6908 if (ret)
6909 goto fail;
6910
6911 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6912 new_crtc_state, i) {
6913 ret = intel_async_flip_check_hw(state, crtc);
6914 if (ret)
6915 goto fail;
6916
6917 if (!intel_crtc_needs_modeset(new_crtc_state) &&
6918 !new_crtc_state->update_pipe)
6919 continue;
6920
6921 intel_crtc_state_dump(new_crtc_state, state,
6922 intel_crtc_needs_modeset(new_crtc_state) ?
6923 "modeset" : "fastset");
6924 }
6925
6926 return 0;
6927
6928 fail:
6929 if (ret == -EDEADLK)
6930 return ret;
6931
6932 /*
6933 * FIXME would probably be nice to know which crtc specifically
6934 * caused the failure, in cases where we can pinpoint it.
6935 */
6936 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6937 new_crtc_state, i)
6938 intel_crtc_state_dump(new_crtc_state, state, "failed");
6939
6940 return ret;
6941 }
6942
intel_atomic_prepare_commit(struct intel_atomic_state * state)6943 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
6944 {
6945 struct intel_crtc_state *crtc_state;
6946 struct intel_crtc *crtc;
6947 int i, ret;
6948
6949 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
6950 if (ret < 0)
6951 return ret;
6952
6953 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6954 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6955
6956 if (mode_changed || crtc_state->update_pipe ||
6957 crtc_state->uapi.color_mgmt_changed) {
6958 intel_dsb_prepare(crtc_state);
6959 }
6960 }
6961
6962 return 0;
6963 }
6964
intel_crtc_arm_fifo_underrun(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)6965 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
6966 struct intel_crtc_state *crtc_state)
6967 {
6968 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6969
6970 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
6971 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6972
6973 if (crtc_state->has_pch_encoder) {
6974 enum pipe pch_transcoder =
6975 intel_crtc_pch_transcoder(crtc);
6976
6977 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
6978 }
6979 }
6980
intel_pipe_fastset(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)6981 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
6982 const struct intel_crtc_state *new_crtc_state)
6983 {
6984 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6985 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6986
6987 /*
6988 * Update pipe size and adjust fitter if needed: the reason for this is
6989 * that in compute_mode_changes we check the native mode (not the pfit
6990 * mode) to see if we can flip rather than do a full mode set. In the
6991 * fastboot case, we'll flip, but if we don't update the pipesrc and
6992 * pfit state, we'll end up with a big fb scanned out into the wrong
6993 * sized surface.
6994 */
6995 intel_set_pipe_src_size(new_crtc_state);
6996
6997 /* on skylake this is done by detaching scalers */
6998 if (DISPLAY_VER(dev_priv) >= 9) {
6999 if (new_crtc_state->pch_pfit.enabled)
7000 skl_pfit_enable(new_crtc_state);
7001 } else if (HAS_PCH_SPLIT(dev_priv)) {
7002 if (new_crtc_state->pch_pfit.enabled)
7003 ilk_pfit_enable(new_crtc_state);
7004 else if (old_crtc_state->pch_pfit.enabled)
7005 ilk_pfit_disable(old_crtc_state);
7006 }
7007
7008 /*
7009 * The register is supposedly single buffered so perhaps
7010 * not 100% correct to do this here. But SKL+ calculate
7011 * this based on the adjust pixel rate so pfit changes do
7012 * affect it and so it must be updated for fastsets.
7013 * HSW/BDW only really need this here for fastboot, after
7014 * that the value should not change without a full modeset.
7015 */
7016 if (DISPLAY_VER(dev_priv) >= 9 ||
7017 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
7018 hsw_set_linetime_wm(new_crtc_state);
7019
7020 if (new_crtc_state->seamless_m_n)
7021 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
7022 &new_crtc_state->dp_m_n);
7023 }
7024
commit_pipe_pre_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)7025 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
7026 struct intel_crtc *crtc)
7027 {
7028 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7029 const struct intel_crtc_state *old_crtc_state =
7030 intel_atomic_get_old_crtc_state(state, crtc);
7031 const struct intel_crtc_state *new_crtc_state =
7032 intel_atomic_get_new_crtc_state(state, crtc);
7033 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7034
7035 /*
7036 * During modesets pipe configuration was programmed as the
7037 * CRTC was enabled.
7038 */
7039 if (!modeset) {
7040 if (new_crtc_state->uapi.color_mgmt_changed ||
7041 new_crtc_state->update_pipe)
7042 intel_color_commit_arm(new_crtc_state);
7043
7044 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7045 bdw_set_pipemisc(new_crtc_state);
7046
7047 if (new_crtc_state->update_pipe)
7048 intel_pipe_fastset(old_crtc_state, new_crtc_state);
7049 }
7050
7051 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
7052
7053 intel_atomic_update_watermarks(state, crtc);
7054 }
7055
commit_pipe_post_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)7056 static void commit_pipe_post_planes(struct intel_atomic_state *state,
7057 struct intel_crtc *crtc)
7058 {
7059 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7060 const struct intel_crtc_state *new_crtc_state =
7061 intel_atomic_get_new_crtc_state(state, crtc);
7062
7063 /*
7064 * Disable the scaler(s) after the plane(s) so that we don't
7065 * get a catastrophic underrun even if the two operations
7066 * end up happening in two different frames.
7067 */
7068 if (DISPLAY_VER(dev_priv) >= 9 &&
7069 !intel_crtc_needs_modeset(new_crtc_state))
7070 skl_detach_scalers(new_crtc_state);
7071 }
7072
intel_enable_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)7073 static void intel_enable_crtc(struct intel_atomic_state *state,
7074 struct intel_crtc *crtc)
7075 {
7076 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7077 const struct intel_crtc_state *new_crtc_state =
7078 intel_atomic_get_new_crtc_state(state, crtc);
7079
7080 if (!intel_crtc_needs_modeset(new_crtc_state))
7081 return;
7082
7083 intel_crtc_update_active_timings(new_crtc_state);
7084
7085 dev_priv->display.funcs.display->crtc_enable(state, crtc);
7086
7087 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
7088 return;
7089
7090 /* vblanks work again, re-enable pipe CRC. */
7091 intel_crtc_enable_pipe_crc(crtc);
7092 }
7093
intel_update_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)7094 static void intel_update_crtc(struct intel_atomic_state *state,
7095 struct intel_crtc *crtc)
7096 {
7097 struct drm_i915_private *i915 = to_i915(state->base.dev);
7098 const struct intel_crtc_state *old_crtc_state =
7099 intel_atomic_get_old_crtc_state(state, crtc);
7100 struct intel_crtc_state *new_crtc_state =
7101 intel_atomic_get_new_crtc_state(state, crtc);
7102 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7103
7104 if (!modeset) {
7105 if (new_crtc_state->preload_luts &&
7106 (new_crtc_state->uapi.color_mgmt_changed ||
7107 new_crtc_state->update_pipe))
7108 intel_color_load_luts(new_crtc_state);
7109
7110 intel_pre_plane_update(state, crtc);
7111
7112 if (new_crtc_state->update_pipe)
7113 intel_encoders_update_pipe(state, crtc);
7114
7115 if (DISPLAY_VER(i915) >= 11 &&
7116 new_crtc_state->update_pipe)
7117 icl_set_pipe_chicken(new_crtc_state);
7118 }
7119
7120 intel_fbc_update(state, crtc);
7121
7122 if (!modeset &&
7123 (new_crtc_state->uapi.color_mgmt_changed ||
7124 new_crtc_state->update_pipe))
7125 intel_color_commit_noarm(new_crtc_state);
7126
7127 intel_crtc_planes_update_noarm(state, crtc);
7128
7129 /* Perform vblank evasion around commit operation */
7130 intel_pipe_update_start(new_crtc_state);
7131
7132 commit_pipe_pre_planes(state, crtc);
7133
7134 intel_crtc_planes_update_arm(state, crtc);
7135
7136 commit_pipe_post_planes(state, crtc);
7137
7138 intel_pipe_update_end(new_crtc_state);
7139
7140 /*
7141 * We usually enable FIFO underrun interrupts as part of the
7142 * CRTC enable sequence during modesets. But when we inherit a
7143 * valid pipe configuration from the BIOS we need to take care
7144 * of enabling them on the CRTC's first fastset.
7145 */
7146 if (new_crtc_state->update_pipe && !modeset &&
7147 old_crtc_state->inherited)
7148 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
7149 }
7150
intel_old_crtc_state_disables(struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state,struct intel_crtc * crtc)7151 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
7152 struct intel_crtc_state *old_crtc_state,
7153 struct intel_crtc_state *new_crtc_state,
7154 struct intel_crtc *crtc)
7155 {
7156 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7157
7158 /*
7159 * We need to disable pipe CRC before disabling the pipe,
7160 * or we race against vblank off.
7161 */
7162 intel_crtc_disable_pipe_crc(crtc);
7163
7164 dev_priv->display.funcs.display->crtc_disable(state, crtc);
7165 crtc->active = false;
7166 intel_fbc_disable(crtc);
7167 intel_disable_shared_dpll(old_crtc_state);
7168
7169 /* FIXME unify this for all platforms */
7170 if (!new_crtc_state->hw.active &&
7171 !HAS_GMCH(dev_priv))
7172 intel_initial_watermarks(state, crtc);
7173 }
7174
intel_commit_modeset_disables(struct intel_atomic_state * state)7175 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
7176 {
7177 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7178 struct intel_crtc *crtc;
7179 u32 handled = 0;
7180 int i;
7181
7182 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7183 new_crtc_state, i) {
7184 if (!intel_crtc_needs_modeset(new_crtc_state))
7185 continue;
7186
7187 if (!old_crtc_state->hw.active)
7188 continue;
7189
7190 intel_pre_plane_update(state, crtc);
7191 intel_crtc_disable_planes(state, crtc);
7192 }
7193
7194 /* Only disable port sync and MST slaves */
7195 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7196 new_crtc_state, i) {
7197 if (!intel_crtc_needs_modeset(new_crtc_state))
7198 continue;
7199
7200 if (!old_crtc_state->hw.active)
7201 continue;
7202
7203 /* In case of Transcoder port Sync master slave CRTCs can be
7204 * assigned in any order and we need to make sure that
7205 * slave CRTCs are disabled first and then master CRTC since
7206 * Slave vblanks are masked till Master Vblanks.
7207 */
7208 if (!is_trans_port_sync_slave(old_crtc_state) &&
7209 !intel_dp_mst_is_slave_trans(old_crtc_state) &&
7210 !intel_crtc_is_bigjoiner_slave(old_crtc_state))
7211 continue;
7212
7213 intel_old_crtc_state_disables(state, old_crtc_state,
7214 new_crtc_state, crtc);
7215 handled |= BIT(crtc->pipe);
7216 }
7217
7218 /* Disable everything else left on */
7219 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7220 new_crtc_state, i) {
7221 if (!intel_crtc_needs_modeset(new_crtc_state) ||
7222 (handled & BIT(crtc->pipe)))
7223 continue;
7224
7225 if (!old_crtc_state->hw.active)
7226 continue;
7227
7228 intel_old_crtc_state_disables(state, old_crtc_state,
7229 new_crtc_state, crtc);
7230 }
7231 }
7232
intel_commit_modeset_enables(struct intel_atomic_state * state)7233 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
7234 {
7235 struct intel_crtc_state *new_crtc_state;
7236 struct intel_crtc *crtc;
7237 int i;
7238
7239 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7240 if (!new_crtc_state->hw.active)
7241 continue;
7242
7243 intel_enable_crtc(state, crtc);
7244 intel_update_crtc(state, crtc);
7245 }
7246 }
7247
skl_commit_modeset_enables(struct intel_atomic_state * state)7248 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
7249 {
7250 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7251 struct intel_crtc *crtc;
7252 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7253 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
7254 u8 update_pipes = 0, modeset_pipes = 0;
7255 int i;
7256
7257 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7258 enum pipe pipe = crtc->pipe;
7259
7260 if (!new_crtc_state->hw.active)
7261 continue;
7262
7263 /* ignore allocations for crtc's that have been turned off. */
7264 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7265 entries[pipe] = old_crtc_state->wm.skl.ddb;
7266 update_pipes |= BIT(pipe);
7267 } else {
7268 modeset_pipes |= BIT(pipe);
7269 }
7270 }
7271
7272 /*
7273 * Whenever the number of active pipes changes, we need to make sure we
7274 * update the pipes in the right order so that their ddb allocations
7275 * never overlap with each other between CRTC updates. Otherwise we'll
7276 * cause pipe underruns and other bad stuff.
7277 *
7278 * So first lets enable all pipes that do not need a fullmodeset as
7279 * those don't have any external dependency.
7280 */
7281 while (update_pipes) {
7282 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7283 new_crtc_state, i) {
7284 enum pipe pipe = crtc->pipe;
7285
7286 if ((update_pipes & BIT(pipe)) == 0)
7287 continue;
7288
7289 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7290 entries, I915_MAX_PIPES, pipe))
7291 continue;
7292
7293 entries[pipe] = new_crtc_state->wm.skl.ddb;
7294 update_pipes &= ~BIT(pipe);
7295
7296 intel_update_crtc(state, crtc);
7297
7298 /*
7299 * If this is an already active pipe, it's DDB changed,
7300 * and this isn't the last pipe that needs updating
7301 * then we need to wait for a vblank to pass for the
7302 * new ddb allocation to take effect.
7303 */
7304 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
7305 &old_crtc_state->wm.skl.ddb) &&
7306 (update_pipes | modeset_pipes))
7307 intel_crtc_wait_for_next_vblank(crtc);
7308 }
7309 }
7310
7311 update_pipes = modeset_pipes;
7312
7313 /*
7314 * Enable all pipes that needs a modeset and do not depends on other
7315 * pipes
7316 */
7317 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7318 enum pipe pipe = crtc->pipe;
7319
7320 if ((modeset_pipes & BIT(pipe)) == 0)
7321 continue;
7322
7323 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
7324 is_trans_port_sync_master(new_crtc_state) ||
7325 intel_crtc_is_bigjoiner_master(new_crtc_state))
7326 continue;
7327
7328 modeset_pipes &= ~BIT(pipe);
7329
7330 intel_enable_crtc(state, crtc);
7331 }
7332
7333 /*
7334 * Then we enable all remaining pipes that depend on other
7335 * pipes: MST slaves and port sync masters, big joiner master
7336 */
7337 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7338 enum pipe pipe = crtc->pipe;
7339
7340 if ((modeset_pipes & BIT(pipe)) == 0)
7341 continue;
7342
7343 modeset_pipes &= ~BIT(pipe);
7344
7345 intel_enable_crtc(state, crtc);
7346 }
7347
7348 /*
7349 * Finally we do the plane updates/etc. for all pipes that got enabled.
7350 */
7351 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7352 enum pipe pipe = crtc->pipe;
7353
7354 if ((update_pipes & BIT(pipe)) == 0)
7355 continue;
7356
7357 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7358 entries, I915_MAX_PIPES, pipe));
7359
7360 entries[pipe] = new_crtc_state->wm.skl.ddb;
7361 update_pipes &= ~BIT(pipe);
7362
7363 intel_update_crtc(state, crtc);
7364 }
7365
7366 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
7367 drm_WARN_ON(&dev_priv->drm, update_pipes);
7368 }
7369
intel_atomic_helper_free_state(struct drm_i915_private * dev_priv)7370 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
7371 {
7372 struct intel_atomic_state *state, *next;
7373 struct llist_node *freed;
7374
7375 freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
7376 llist_for_each_entry_safe(state, next, freed, freed)
7377 drm_atomic_state_put(&state->base);
7378 }
7379
intel_atomic_helper_free_state_worker(struct work_struct * work)7380 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
7381 {
7382 struct drm_i915_private *dev_priv =
7383 container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
7384
7385 intel_atomic_helper_free_state(dev_priv);
7386 }
7387
intel_atomic_commit_fence_wait(struct intel_atomic_state * intel_state)7388 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
7389 {
7390 struct wait_queue_entry wait_fence, wait_reset;
7391 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
7392
7393 init_wait_entry(&wait_fence, 0);
7394 init_wait_entry(&wait_reset, 0);
7395 for (;;) {
7396 prepare_to_wait(&intel_state->commit_ready.wait,
7397 &wait_fence, TASK_UNINTERRUPTIBLE);
7398 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7399 I915_RESET_MODESET),
7400 &wait_reset, TASK_UNINTERRUPTIBLE);
7401
7402
7403 if (i915_sw_fence_done(&intel_state->commit_ready) ||
7404 test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
7405 break;
7406
7407 schedule();
7408 }
7409 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
7410 finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7411 I915_RESET_MODESET),
7412 &wait_reset);
7413 }
7414
intel_cleanup_dsbs(struct intel_atomic_state * state)7415 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
7416 {
7417 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7418 struct intel_crtc *crtc;
7419 int i;
7420
7421 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7422 new_crtc_state, i)
7423 intel_dsb_cleanup(old_crtc_state);
7424 }
7425
intel_atomic_cleanup_work(struct work_struct * work)7426 static void intel_atomic_cleanup_work(struct work_struct *work)
7427 {
7428 struct intel_atomic_state *state =
7429 container_of(work, struct intel_atomic_state, base.commit_work);
7430 struct drm_i915_private *i915 = to_i915(state->base.dev);
7431
7432 intel_cleanup_dsbs(state);
7433 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
7434 drm_atomic_helper_commit_cleanup_done(&state->base);
7435 drm_atomic_state_put(&state->base);
7436
7437 intel_atomic_helper_free_state(i915);
7438 }
7439
intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state * state)7440 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
7441 {
7442 struct drm_i915_private *i915 = to_i915(state->base.dev);
7443 struct intel_plane *plane;
7444 struct intel_plane_state *plane_state;
7445 int i;
7446
7447 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7448 struct drm_framebuffer *fb = plane_state->hw.fb;
7449 int cc_plane;
7450 int ret;
7451
7452 if (!fb)
7453 continue;
7454
7455 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
7456 if (cc_plane < 0)
7457 continue;
7458
7459 /*
7460 * The layout of the fast clear color value expected by HW
7461 * (the DRM ABI requiring this value to be located in fb at
7462 * offset 0 of cc plane, plane #2 previous generations or
7463 * plane #1 for flat ccs):
7464 * - 4 x 4 bytes per-channel value
7465 * (in surface type specific float/int format provided by the fb user)
7466 * - 8 bytes native color value used by the display
7467 * (converted/written by GPU during a fast clear operation using the
7468 * above per-channel values)
7469 *
7470 * The commit's FB prepare hook already ensured that FB obj is pinned and the
7471 * caller made sure that the object is synced wrt. the related color clear value
7472 * GPU write on it.
7473 */
7474 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
7475 fb->offsets[cc_plane] + 16,
7476 &plane_state->ccval,
7477 sizeof(plane_state->ccval));
7478 /* The above could only fail if the FB obj has an unexpected backing store type. */
7479 drm_WARN_ON(&i915->drm, ret);
7480 }
7481 }
7482
intel_atomic_commit_tail(struct intel_atomic_state * state)7483 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
7484 {
7485 struct drm_device *dev = state->base.dev;
7486 struct drm_i915_private *dev_priv = to_i915(dev);
7487 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7488 struct intel_crtc *crtc;
7489 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
7490 intel_wakeref_t wakeref = 0;
7491 int i;
7492
7493 intel_atomic_commit_fence_wait(state);
7494
7495 drm_atomic_helper_wait_for_dependencies(&state->base);
7496 drm_dp_mst_atomic_wait_for_dependencies(&state->base);
7497
7498 if (state->modeset)
7499 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
7500
7501 intel_atomic_prepare_plane_clear_colors(state);
7502
7503 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7504 new_crtc_state, i) {
7505 if (intel_crtc_needs_modeset(new_crtc_state) ||
7506 new_crtc_state->update_pipe) {
7507 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
7508 }
7509 }
7510
7511 intel_commit_modeset_disables(state);
7512
7513 /* FIXME: Eventually get rid of our crtc->config pointer */
7514 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7515 crtc->config = new_crtc_state;
7516
7517 if (state->modeset) {
7518 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
7519
7520 intel_set_cdclk_pre_plane_update(state);
7521
7522 intel_modeset_verify_disabled(dev_priv, state);
7523 }
7524
7525 intel_sagv_pre_plane_update(state);
7526
7527 /* Complete the events for pipes that have now been disabled */
7528 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7529 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7530
7531 /* Complete events for now disable pipes here. */
7532 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
7533 spin_lock_irq(&dev->event_lock);
7534 drm_crtc_send_vblank_event(&crtc->base,
7535 new_crtc_state->uapi.event);
7536 spin_unlock_irq(&dev->event_lock);
7537
7538 new_crtc_state->uapi.event = NULL;
7539 }
7540 }
7541
7542 intel_encoders_update_prepare(state);
7543
7544 intel_dbuf_pre_plane_update(state);
7545 intel_mbus_dbox_update(state);
7546
7547 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7548 if (new_crtc_state->do_async_flip)
7549 intel_crtc_enable_flip_done(state, crtc);
7550 }
7551
7552 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7553 dev_priv->display.funcs.display->commit_modeset_enables(state);
7554
7555 intel_encoders_update_complete(state);
7556
7557 if (state->modeset)
7558 intel_set_cdclk_post_plane_update(state);
7559
7560 intel_wait_for_vblank_workers(state);
7561
7562 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
7563 * already, but still need the state for the delayed optimization. To
7564 * fix this:
7565 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
7566 * - schedule that vblank worker _before_ calling hw_done
7567 * - at the start of commit_tail, cancel it _synchrously
7568 * - switch over to the vblank wait helper in the core after that since
7569 * we don't need out special handling any more.
7570 */
7571 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
7572
7573 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7574 if (new_crtc_state->do_async_flip)
7575 intel_crtc_disable_flip_done(state, crtc);
7576 }
7577
7578 /*
7579 * Now that the vblank has passed, we can go ahead and program the
7580 * optimal watermarks on platforms that need two-step watermark
7581 * programming.
7582 *
7583 * TODO: Move this (and other cleanup) to an async worker eventually.
7584 */
7585 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7586 new_crtc_state, i) {
7587 /*
7588 * Gen2 reports pipe underruns whenever all planes are disabled.
7589 * So re-enable underrun reporting after some planes get enabled.
7590 *
7591 * We do this before .optimize_watermarks() so that we have a
7592 * chance of catching underruns with the intermediate watermarks
7593 * vs. the new plane configuration.
7594 */
7595 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
7596 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7597
7598 intel_optimize_watermarks(state, crtc);
7599 }
7600
7601 intel_dbuf_post_plane_update(state);
7602 intel_psr_post_plane_update(state);
7603
7604 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7605 intel_post_plane_update(state, crtc);
7606
7607 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
7608
7609 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
7610
7611 /*
7612 * DSB cleanup is done in cleanup_work aligning with framebuffer
7613 * cleanup. So copy and reset the dsb structure to sync with
7614 * commit_done and later do dsb cleanup in cleanup_work.
7615 */
7616 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
7617 }
7618
7619 /* Underruns don't always raise interrupts, so check manually */
7620 intel_check_cpu_fifo_underruns(dev_priv);
7621 intel_check_pch_fifo_underruns(dev_priv);
7622
7623 if (state->modeset)
7624 intel_verify_planes(state);
7625
7626 intel_sagv_post_plane_update(state);
7627
7628 drm_atomic_helper_commit_hw_done(&state->base);
7629
7630 if (state->modeset) {
7631 /* As one of the primary mmio accessors, KMS has a high
7632 * likelihood of triggering bugs in unclaimed access. After we
7633 * finish modesetting, see if an error has been flagged, and if
7634 * so enable debugging for the next modeset - and hope we catch
7635 * the culprit.
7636 */
7637 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
7638 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
7639 }
7640 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7641
7642 /*
7643 * Defer the cleanup of the old state to a separate worker to not
7644 * impede the current task (userspace for blocking modesets) that
7645 * are executed inline. For out-of-line asynchronous modesets/flips,
7646 * deferring to a new worker seems overkill, but we would place a
7647 * schedule point (cond_resched()) here anyway to keep latencies
7648 * down.
7649 */
7650 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
7651 queue_work(system_highpri_wq, &state->base.commit_work);
7652 }
7653
intel_atomic_commit_work(struct work_struct * work)7654 static void intel_atomic_commit_work(struct work_struct *work)
7655 {
7656 struct intel_atomic_state *state =
7657 container_of(work, struct intel_atomic_state, base.commit_work);
7658
7659 intel_atomic_commit_tail(state);
7660 }
7661
7662 static int
intel_atomic_commit_ready(struct i915_sw_fence * fence,enum i915_sw_fence_notify notify)7663 intel_atomic_commit_ready(struct i915_sw_fence *fence,
7664 enum i915_sw_fence_notify notify)
7665 {
7666 struct intel_atomic_state *state =
7667 container_of(fence, struct intel_atomic_state, commit_ready);
7668
7669 switch (notify) {
7670 case FENCE_COMPLETE:
7671 /* we do blocking waits in the worker, nothing to do here */
7672 break;
7673 case FENCE_FREE:
7674 {
7675 struct intel_atomic_helper *helper =
7676 &to_i915(state->base.dev)->display.atomic_helper;
7677
7678 if (llist_add(&state->freed, &helper->free_list))
7679 schedule_work(&helper->free_work);
7680 break;
7681 }
7682 }
7683
7684 return NOTIFY_DONE;
7685 }
7686
intel_atomic_track_fbs(struct intel_atomic_state * state)7687 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
7688 {
7689 struct intel_plane_state *old_plane_state, *new_plane_state;
7690 struct intel_plane *plane;
7691 int i;
7692
7693 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7694 new_plane_state, i)
7695 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
7696 to_intel_frontbuffer(new_plane_state->hw.fb),
7697 plane->frontbuffer_bit);
7698 }
7699
intel_atomic_commit(struct drm_device * dev,struct drm_atomic_state * _state,bool nonblock)7700 static int intel_atomic_commit(struct drm_device *dev,
7701 struct drm_atomic_state *_state,
7702 bool nonblock)
7703 {
7704 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7705 struct drm_i915_private *dev_priv = to_i915(dev);
7706 int ret = 0;
7707
7708 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
7709
7710 drm_atomic_state_get(&state->base);
7711 i915_sw_fence_init(&state->commit_ready,
7712 intel_atomic_commit_ready);
7713
7714 /*
7715 * The intel_legacy_cursor_update() fast path takes care
7716 * of avoiding the vblank waits for simple cursor
7717 * movement and flips. For cursor on/off and size changes,
7718 * we want to perform the vblank waits so that watermark
7719 * updates happen during the correct frames. Gen9+ have
7720 * double buffered watermarks and so shouldn't need this.
7721 *
7722 * Unset state->legacy_cursor_update before the call to
7723 * drm_atomic_helper_setup_commit() because otherwise
7724 * drm_atomic_helper_wait_for_flip_done() is a noop and
7725 * we get FIFO underruns because we didn't wait
7726 * for vblank.
7727 *
7728 * FIXME doing watermarks and fb cleanup from a vblank worker
7729 * (assuming we had any) would solve these problems.
7730 */
7731 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
7732 struct intel_crtc_state *new_crtc_state;
7733 struct intel_crtc *crtc;
7734 int i;
7735
7736 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7737 if (new_crtc_state->wm.need_postvbl_update ||
7738 new_crtc_state->update_wm_post)
7739 state->base.legacy_cursor_update = false;
7740 }
7741
7742 ret = intel_atomic_prepare_commit(state);
7743 if (ret) {
7744 drm_dbg_atomic(&dev_priv->drm,
7745 "Preparing state failed with %i\n", ret);
7746 i915_sw_fence_commit(&state->commit_ready);
7747 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7748 return ret;
7749 }
7750
7751 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
7752 if (!ret)
7753 ret = drm_atomic_helper_swap_state(&state->base, true);
7754 if (!ret)
7755 intel_atomic_swap_global_state(state);
7756
7757 if (ret) {
7758 struct intel_crtc_state *new_crtc_state;
7759 struct intel_crtc *crtc;
7760 int i;
7761
7762 i915_sw_fence_commit(&state->commit_ready);
7763
7764 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7765 intel_dsb_cleanup(new_crtc_state);
7766
7767 drm_atomic_helper_cleanup_planes(dev, &state->base);
7768 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7769 return ret;
7770 }
7771 intel_shared_dpll_swap_state(state);
7772 intel_atomic_track_fbs(state);
7773
7774 drm_atomic_state_get(&state->base);
7775 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
7776
7777 i915_sw_fence_commit(&state->commit_ready);
7778 if (nonblock && state->modeset) {
7779 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
7780 } else if (nonblock) {
7781 queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
7782 } else {
7783 if (state->modeset)
7784 flush_workqueue(dev_priv->display.wq.modeset);
7785 intel_atomic_commit_tail(state);
7786 }
7787
7788 return 0;
7789 }
7790
7791 /**
7792 * intel_plane_destroy - destroy a plane
7793 * @plane: plane to destroy
7794 *
7795 * Common destruction function for all types of planes (primary, cursor,
7796 * sprite).
7797 */
intel_plane_destroy(struct drm_plane * plane)7798 void intel_plane_destroy(struct drm_plane *plane)
7799 {
7800 drm_plane_cleanup(plane);
7801 kfree(to_intel_plane(plane));
7802 }
7803
intel_plane_possible_crtcs_init(struct drm_i915_private * dev_priv)7804 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
7805 {
7806 struct intel_plane *plane;
7807
7808 for_each_intel_plane(&dev_priv->drm, plane) {
7809 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
7810 plane->pipe);
7811
7812 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
7813 }
7814 }
7815
7816
intel_get_pipe_from_crtc_id_ioctl(struct drm_device * dev,void * data,struct drm_file * file)7817 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
7818 struct drm_file *file)
7819 {
7820 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7821 struct drm_crtc *drmmode_crtc;
7822 struct intel_crtc *crtc;
7823
7824 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
7825 if (!drmmode_crtc)
7826 return -ENOENT;
7827
7828 crtc = to_intel_crtc(drmmode_crtc);
7829 pipe_from_crtc_id->pipe = crtc->pipe;
7830
7831 return 0;
7832 }
7833
intel_encoder_possible_clones(struct intel_encoder * encoder)7834 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
7835 {
7836 struct drm_device *dev = encoder->base.dev;
7837 struct intel_encoder *source_encoder;
7838 u32 possible_clones = 0;
7839
7840 for_each_intel_encoder(dev, source_encoder) {
7841 if (encoders_cloneable(encoder, source_encoder))
7842 possible_clones |= drm_encoder_mask(&source_encoder->base);
7843 }
7844
7845 return possible_clones;
7846 }
7847
intel_encoder_possible_crtcs(struct intel_encoder * encoder)7848 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
7849 {
7850 struct drm_device *dev = encoder->base.dev;
7851 struct intel_crtc *crtc;
7852 u32 possible_crtcs = 0;
7853
7854 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
7855 possible_crtcs |= drm_crtc_mask(&crtc->base);
7856
7857 return possible_crtcs;
7858 }
7859
ilk_has_edp_a(struct drm_i915_private * dev_priv)7860 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
7861 {
7862 if (!IS_MOBILE(dev_priv))
7863 return false;
7864
7865 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
7866 return false;
7867
7868 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
7869 return false;
7870
7871 return true;
7872 }
7873
intel_ddi_crt_present(struct drm_i915_private * dev_priv)7874 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
7875 {
7876 if (DISPLAY_VER(dev_priv) >= 9)
7877 return false;
7878
7879 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
7880 return false;
7881
7882 if (HAS_PCH_LPT_H(dev_priv) &&
7883 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
7884 return false;
7885
7886 /* DDI E can't be used if DDI A requires 4 lanes */
7887 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
7888 return false;
7889
7890 if (!dev_priv->display.vbt.int_crt_support)
7891 return false;
7892
7893 return true;
7894 }
7895
intel_setup_outputs(struct drm_i915_private * dev_priv)7896 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
7897 {
7898 struct intel_encoder *encoder;
7899 bool dpd_is_edp = false;
7900
7901 intel_pps_unlock_regs_wa(dev_priv);
7902
7903 if (!HAS_DISPLAY(dev_priv))
7904 return;
7905
7906 if (IS_DG2(dev_priv)) {
7907 intel_ddi_init(dev_priv, PORT_A);
7908 intel_ddi_init(dev_priv, PORT_B);
7909 intel_ddi_init(dev_priv, PORT_C);
7910 intel_ddi_init(dev_priv, PORT_D_XELPD);
7911 intel_ddi_init(dev_priv, PORT_TC1);
7912 } else if (IS_ALDERLAKE_P(dev_priv)) {
7913 intel_ddi_init(dev_priv, PORT_A);
7914 intel_ddi_init(dev_priv, PORT_B);
7915 intel_ddi_init(dev_priv, PORT_TC1);
7916 intel_ddi_init(dev_priv, PORT_TC2);
7917 intel_ddi_init(dev_priv, PORT_TC3);
7918 intel_ddi_init(dev_priv, PORT_TC4);
7919 icl_dsi_init(dev_priv);
7920 } else if (IS_ALDERLAKE_S(dev_priv)) {
7921 intel_ddi_init(dev_priv, PORT_A);
7922 intel_ddi_init(dev_priv, PORT_TC1);
7923 intel_ddi_init(dev_priv, PORT_TC2);
7924 intel_ddi_init(dev_priv, PORT_TC3);
7925 intel_ddi_init(dev_priv, PORT_TC4);
7926 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
7927 intel_ddi_init(dev_priv, PORT_A);
7928 intel_ddi_init(dev_priv, PORT_B);
7929 intel_ddi_init(dev_priv, PORT_TC1);
7930 intel_ddi_init(dev_priv, PORT_TC2);
7931 } else if (DISPLAY_VER(dev_priv) >= 12) {
7932 intel_ddi_init(dev_priv, PORT_A);
7933 intel_ddi_init(dev_priv, PORT_B);
7934 intel_ddi_init(dev_priv, PORT_TC1);
7935 intel_ddi_init(dev_priv, PORT_TC2);
7936 intel_ddi_init(dev_priv, PORT_TC3);
7937 intel_ddi_init(dev_priv, PORT_TC4);
7938 intel_ddi_init(dev_priv, PORT_TC5);
7939 intel_ddi_init(dev_priv, PORT_TC6);
7940 icl_dsi_init(dev_priv);
7941 } else if (IS_JSL_EHL(dev_priv)) {
7942 intel_ddi_init(dev_priv, PORT_A);
7943 intel_ddi_init(dev_priv, PORT_B);
7944 intel_ddi_init(dev_priv, PORT_C);
7945 intel_ddi_init(dev_priv, PORT_D);
7946 icl_dsi_init(dev_priv);
7947 } else if (DISPLAY_VER(dev_priv) == 11) {
7948 intel_ddi_init(dev_priv, PORT_A);
7949 intel_ddi_init(dev_priv, PORT_B);
7950 intel_ddi_init(dev_priv, PORT_C);
7951 intel_ddi_init(dev_priv, PORT_D);
7952 intel_ddi_init(dev_priv, PORT_E);
7953 intel_ddi_init(dev_priv, PORT_F);
7954 icl_dsi_init(dev_priv);
7955 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
7956 intel_ddi_init(dev_priv, PORT_A);
7957 intel_ddi_init(dev_priv, PORT_B);
7958 intel_ddi_init(dev_priv, PORT_C);
7959 vlv_dsi_init(dev_priv);
7960 } else if (DISPLAY_VER(dev_priv) >= 9) {
7961 intel_ddi_init(dev_priv, PORT_A);
7962 intel_ddi_init(dev_priv, PORT_B);
7963 intel_ddi_init(dev_priv, PORT_C);
7964 intel_ddi_init(dev_priv, PORT_D);
7965 intel_ddi_init(dev_priv, PORT_E);
7966 } else if (HAS_DDI(dev_priv)) {
7967 u32 found;
7968
7969 if (intel_ddi_crt_present(dev_priv))
7970 intel_crt_init(dev_priv);
7971
7972 /* Haswell uses DDI functions to detect digital outputs. */
7973 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
7974 if (found)
7975 intel_ddi_init(dev_priv, PORT_A);
7976
7977 found = intel_de_read(dev_priv, SFUSE_STRAP);
7978 if (found & SFUSE_STRAP_DDIB_DETECTED)
7979 intel_ddi_init(dev_priv, PORT_B);
7980 if (found & SFUSE_STRAP_DDIC_DETECTED)
7981 intel_ddi_init(dev_priv, PORT_C);
7982 if (found & SFUSE_STRAP_DDID_DETECTED)
7983 intel_ddi_init(dev_priv, PORT_D);
7984 if (found & SFUSE_STRAP_DDIF_DETECTED)
7985 intel_ddi_init(dev_priv, PORT_F);
7986 } else if (HAS_PCH_SPLIT(dev_priv)) {
7987 int found;
7988
7989 /*
7990 * intel_edp_init_connector() depends on this completing first,
7991 * to prevent the registration of both eDP and LVDS and the
7992 * incorrect sharing of the PPS.
7993 */
7994 intel_lvds_init(dev_priv);
7995 intel_crt_init(dev_priv);
7996
7997 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
7998
7999 if (ilk_has_edp_a(dev_priv))
8000 g4x_dp_init(dev_priv, DP_A, PORT_A);
8001
8002 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
8003 /* PCH SDVOB multiplex with HDMIB */
8004 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
8005 if (!found)
8006 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
8007 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
8008 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
8009 }
8010
8011 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
8012 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
8013
8014 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
8015 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
8016
8017 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
8018 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
8019
8020 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
8021 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
8022 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8023 bool has_edp, has_port;
8024
8025 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
8026 intel_crt_init(dev_priv);
8027
8028 /*
8029 * The DP_DETECTED bit is the latched state of the DDC
8030 * SDA pin at boot. However since eDP doesn't require DDC
8031 * (no way to plug in a DP->HDMI dongle) the DDC pins for
8032 * eDP ports may have been muxed to an alternate function.
8033 * Thus we can't rely on the DP_DETECTED bit alone to detect
8034 * eDP ports. Consult the VBT as well as DP_DETECTED to
8035 * detect eDP ports.
8036 *
8037 * Sadly the straps seem to be missing sometimes even for HDMI
8038 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
8039 * and VBT for the presence of the port. Additionally we can't
8040 * trust the port type the VBT declares as we've seen at least
8041 * HDMI ports that the VBT claim are DP or eDP.
8042 */
8043 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
8044 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
8045 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
8046 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
8047 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
8048 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
8049
8050 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
8051 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
8052 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
8053 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
8054 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
8055 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
8056
8057 if (IS_CHERRYVIEW(dev_priv)) {
8058 /*
8059 * eDP not supported on port D,
8060 * so no need to worry about it
8061 */
8062 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
8063 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
8064 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
8065 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
8066 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
8067 }
8068
8069 vlv_dsi_init(dev_priv);
8070 } else if (IS_PINEVIEW(dev_priv)) {
8071 intel_lvds_init(dev_priv);
8072 intel_crt_init(dev_priv);
8073 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
8074 bool found = false;
8075
8076 if (IS_MOBILE(dev_priv))
8077 intel_lvds_init(dev_priv);
8078
8079 intel_crt_init(dev_priv);
8080
8081 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
8082 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
8083 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
8084 if (!found && IS_G4X(dev_priv)) {
8085 drm_dbg_kms(&dev_priv->drm,
8086 "probing HDMI on SDVOB\n");
8087 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
8088 }
8089
8090 if (!found && IS_G4X(dev_priv))
8091 g4x_dp_init(dev_priv, DP_B, PORT_B);
8092 }
8093
8094 /* Before G4X SDVOC doesn't have its own detect register */
8095
8096 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
8097 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
8098 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
8099 }
8100
8101 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
8102
8103 if (IS_G4X(dev_priv)) {
8104 drm_dbg_kms(&dev_priv->drm,
8105 "probing HDMI on SDVOC\n");
8106 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
8107 }
8108 if (IS_G4X(dev_priv))
8109 g4x_dp_init(dev_priv, DP_C, PORT_C);
8110 }
8111
8112 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
8113 g4x_dp_init(dev_priv, DP_D, PORT_D);
8114
8115 if (SUPPORTS_TV(dev_priv))
8116 intel_tv_init(dev_priv);
8117 } else if (DISPLAY_VER(dev_priv) == 2) {
8118 if (IS_I85X(dev_priv))
8119 intel_lvds_init(dev_priv);
8120
8121 intel_crt_init(dev_priv);
8122 intel_dvo_init(dev_priv);
8123 }
8124
8125 for_each_intel_encoder(&dev_priv->drm, encoder) {
8126 encoder->base.possible_crtcs =
8127 intel_encoder_possible_crtcs(encoder);
8128 encoder->base.possible_clones =
8129 intel_encoder_possible_clones(encoder);
8130 }
8131
8132 intel_init_pch_refclk(dev_priv);
8133
8134 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
8135 }
8136
max_dotclock(struct drm_i915_private * i915)8137 static int max_dotclock(struct drm_i915_private *i915)
8138 {
8139 int max_dotclock = i915->max_dotclk_freq;
8140
8141 /* icl+ might use bigjoiner */
8142 if (DISPLAY_VER(i915) >= 11)
8143 max_dotclock *= 2;
8144
8145 return max_dotclock;
8146 }
8147
8148 static enum drm_mode_status
intel_mode_valid(struct drm_device * dev,const struct drm_display_mode * mode)8149 intel_mode_valid(struct drm_device *dev,
8150 const struct drm_display_mode *mode)
8151 {
8152 struct drm_i915_private *dev_priv = to_i915(dev);
8153 int hdisplay_max, htotal_max;
8154 int vdisplay_max, vtotal_max;
8155
8156 /*
8157 * Can't reject DBLSCAN here because Xorg ddxen can add piles
8158 * of DBLSCAN modes to the output's mode list when they detect
8159 * the scaling mode property on the connector. And they don't
8160 * ask the kernel to validate those modes in any way until
8161 * modeset time at which point the client gets a protocol error.
8162 * So in order to not upset those clients we silently ignore the
8163 * DBLSCAN flag on such connectors. For other connectors we will
8164 * reject modes with the DBLSCAN flag in encoder->compute_config().
8165 * And we always reject DBLSCAN modes in connector->mode_valid()
8166 * as we never want such modes on the connector's mode list.
8167 */
8168
8169 if (mode->vscan > 1)
8170 return MODE_NO_VSCAN;
8171
8172 if (mode->flags & DRM_MODE_FLAG_HSKEW)
8173 return MODE_H_ILLEGAL;
8174
8175 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
8176 DRM_MODE_FLAG_NCSYNC |
8177 DRM_MODE_FLAG_PCSYNC))
8178 return MODE_HSYNC;
8179
8180 if (mode->flags & (DRM_MODE_FLAG_BCAST |
8181 DRM_MODE_FLAG_PIXMUX |
8182 DRM_MODE_FLAG_CLKDIV2))
8183 return MODE_BAD;
8184
8185 /*
8186 * Reject clearly excessive dotclocks early to
8187 * avoid having to worry about huge integers later.
8188 */
8189 if (mode->clock > max_dotclock(dev_priv))
8190 return MODE_CLOCK_HIGH;
8191
8192 /* Transcoder timing limits */
8193 if (DISPLAY_VER(dev_priv) >= 11) {
8194 hdisplay_max = 16384;
8195 vdisplay_max = 8192;
8196 htotal_max = 16384;
8197 vtotal_max = 8192;
8198 } else if (DISPLAY_VER(dev_priv) >= 9 ||
8199 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
8200 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
8201 vdisplay_max = 4096;
8202 htotal_max = 8192;
8203 vtotal_max = 8192;
8204 } else if (DISPLAY_VER(dev_priv) >= 3) {
8205 hdisplay_max = 4096;
8206 vdisplay_max = 4096;
8207 htotal_max = 8192;
8208 vtotal_max = 8192;
8209 } else {
8210 hdisplay_max = 2048;
8211 vdisplay_max = 2048;
8212 htotal_max = 4096;
8213 vtotal_max = 4096;
8214 }
8215
8216 if (mode->hdisplay > hdisplay_max ||
8217 mode->hsync_start > htotal_max ||
8218 mode->hsync_end > htotal_max ||
8219 mode->htotal > htotal_max)
8220 return MODE_H_ILLEGAL;
8221
8222 if (mode->vdisplay > vdisplay_max ||
8223 mode->vsync_start > vtotal_max ||
8224 mode->vsync_end > vtotal_max ||
8225 mode->vtotal > vtotal_max)
8226 return MODE_V_ILLEGAL;
8227
8228 if (DISPLAY_VER(dev_priv) >= 5) {
8229 if (mode->hdisplay < 64 ||
8230 mode->htotal - mode->hdisplay < 32)
8231 return MODE_H_ILLEGAL;
8232
8233 if (mode->vtotal - mode->vdisplay < 5)
8234 return MODE_V_ILLEGAL;
8235 } else {
8236 if (mode->htotal - mode->hdisplay < 32)
8237 return MODE_H_ILLEGAL;
8238
8239 if (mode->vtotal - mode->vdisplay < 3)
8240 return MODE_V_ILLEGAL;
8241 }
8242
8243 /*
8244 * Cantiga+ cannot handle modes with a hsync front porch of 0.
8245 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8246 */
8247 if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8248 mode->hsync_start == mode->hdisplay)
8249 return MODE_H_ILLEGAL;
8250
8251 return MODE_OK;
8252 }
8253
8254 enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode,bool bigjoiner)8255 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
8256 const struct drm_display_mode *mode,
8257 bool bigjoiner)
8258 {
8259 int plane_width_max, plane_height_max;
8260
8261 /*
8262 * intel_mode_valid() should be
8263 * sufficient on older platforms.
8264 */
8265 if (DISPLAY_VER(dev_priv) < 9)
8266 return MODE_OK;
8267
8268 /*
8269 * Most people will probably want a fullscreen
8270 * plane so let's not advertize modes that are
8271 * too big for that.
8272 */
8273 if (DISPLAY_VER(dev_priv) >= 11) {
8274 plane_width_max = 5120 << bigjoiner;
8275 plane_height_max = 4320;
8276 } else {
8277 plane_width_max = 5120;
8278 plane_height_max = 4096;
8279 }
8280
8281 if (mode->hdisplay > plane_width_max)
8282 return MODE_H_ILLEGAL;
8283
8284 if (mode->vdisplay > plane_height_max)
8285 return MODE_V_ILLEGAL;
8286
8287 return MODE_OK;
8288 }
8289
8290 static const struct drm_mode_config_funcs intel_mode_funcs = {
8291 .fb_create = intel_user_framebuffer_create,
8292 .get_format_info = intel_fb_get_format_info,
8293 .output_poll_changed = intel_fbdev_output_poll_changed,
8294 .mode_valid = intel_mode_valid,
8295 .atomic_check = intel_atomic_check,
8296 .atomic_commit = intel_atomic_commit,
8297 .atomic_state_alloc = intel_atomic_state_alloc,
8298 .atomic_state_clear = intel_atomic_state_clear,
8299 .atomic_state_free = intel_atomic_state_free,
8300 };
8301
8302 static const struct intel_display_funcs skl_display_funcs = {
8303 .get_pipe_config = hsw_get_pipe_config,
8304 .crtc_enable = hsw_crtc_enable,
8305 .crtc_disable = hsw_crtc_disable,
8306 .commit_modeset_enables = skl_commit_modeset_enables,
8307 .get_initial_plane_config = skl_get_initial_plane_config,
8308 };
8309
8310 static const struct intel_display_funcs ddi_display_funcs = {
8311 .get_pipe_config = hsw_get_pipe_config,
8312 .crtc_enable = hsw_crtc_enable,
8313 .crtc_disable = hsw_crtc_disable,
8314 .commit_modeset_enables = intel_commit_modeset_enables,
8315 .get_initial_plane_config = i9xx_get_initial_plane_config,
8316 };
8317
8318 static const struct intel_display_funcs pch_split_display_funcs = {
8319 .get_pipe_config = ilk_get_pipe_config,
8320 .crtc_enable = ilk_crtc_enable,
8321 .crtc_disable = ilk_crtc_disable,
8322 .commit_modeset_enables = intel_commit_modeset_enables,
8323 .get_initial_plane_config = i9xx_get_initial_plane_config,
8324 };
8325
8326 static const struct intel_display_funcs vlv_display_funcs = {
8327 .get_pipe_config = i9xx_get_pipe_config,
8328 .crtc_enable = valleyview_crtc_enable,
8329 .crtc_disable = i9xx_crtc_disable,
8330 .commit_modeset_enables = intel_commit_modeset_enables,
8331 .get_initial_plane_config = i9xx_get_initial_plane_config,
8332 };
8333
8334 static const struct intel_display_funcs i9xx_display_funcs = {
8335 .get_pipe_config = i9xx_get_pipe_config,
8336 .crtc_enable = i9xx_crtc_enable,
8337 .crtc_disable = i9xx_crtc_disable,
8338 .commit_modeset_enables = intel_commit_modeset_enables,
8339 .get_initial_plane_config = i9xx_get_initial_plane_config,
8340 };
8341
8342 /**
8343 * intel_init_display_hooks - initialize the display modesetting hooks
8344 * @dev_priv: device private
8345 */
intel_init_display_hooks(struct drm_i915_private * dev_priv)8346 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
8347 {
8348 if (!HAS_DISPLAY(dev_priv))
8349 return;
8350
8351 intel_init_cdclk_hooks(dev_priv);
8352 intel_audio_hooks_init(dev_priv);
8353
8354 intel_dpll_init_clock_hook(dev_priv);
8355
8356 if (DISPLAY_VER(dev_priv) >= 9) {
8357 dev_priv->display.funcs.display = &skl_display_funcs;
8358 } else if (HAS_DDI(dev_priv)) {
8359 dev_priv->display.funcs.display = &ddi_display_funcs;
8360 } else if (HAS_PCH_SPLIT(dev_priv)) {
8361 dev_priv->display.funcs.display = &pch_split_display_funcs;
8362 } else if (IS_CHERRYVIEW(dev_priv) ||
8363 IS_VALLEYVIEW(dev_priv)) {
8364 dev_priv->display.funcs.display = &vlv_display_funcs;
8365 } else {
8366 dev_priv->display.funcs.display = &i9xx_display_funcs;
8367 }
8368
8369 intel_fdi_init_hook(dev_priv);
8370 }
8371
intel_modeset_init_hw(struct drm_i915_private * i915)8372 void intel_modeset_init_hw(struct drm_i915_private *i915)
8373 {
8374 struct intel_cdclk_state *cdclk_state;
8375
8376 if (!HAS_DISPLAY(i915))
8377 return;
8378
8379 cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
8380
8381 intel_update_cdclk(i915);
8382 intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
8383 cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
8384 }
8385
sanitize_watermarks_add_affected(struct drm_atomic_state * state)8386 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
8387 {
8388 struct drm_plane *plane;
8389 struct intel_crtc *crtc;
8390
8391 for_each_intel_crtc(state->dev, crtc) {
8392 struct intel_crtc_state *crtc_state;
8393
8394 crtc_state = intel_atomic_get_crtc_state(state, crtc);
8395 if (IS_ERR(crtc_state))
8396 return PTR_ERR(crtc_state);
8397
8398 if (crtc_state->hw.active) {
8399 /*
8400 * Preserve the inherited flag to avoid
8401 * taking the full modeset path.
8402 */
8403 crtc_state->inherited = true;
8404 }
8405 }
8406
8407 drm_for_each_plane(plane, state->dev) {
8408 struct drm_plane_state *plane_state;
8409
8410 plane_state = drm_atomic_get_plane_state(state, plane);
8411 if (IS_ERR(plane_state))
8412 return PTR_ERR(plane_state);
8413 }
8414
8415 return 0;
8416 }
8417
8418 /*
8419 * Calculate what we think the watermarks should be for the state we've read
8420 * out of the hardware and then immediately program those watermarks so that
8421 * we ensure the hardware settings match our internal state.
8422 *
8423 * We can calculate what we think WM's should be by creating a duplicate of the
8424 * current state (which was constructed during hardware readout) and running it
8425 * through the atomic check code to calculate new watermark values in the
8426 * state object.
8427 */
sanitize_watermarks(struct drm_i915_private * dev_priv)8428 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
8429 {
8430 struct drm_atomic_state *state;
8431 struct intel_atomic_state *intel_state;
8432 struct intel_crtc *crtc;
8433 struct intel_crtc_state *crtc_state;
8434 struct drm_modeset_acquire_ctx ctx;
8435 int ret;
8436 int i;
8437
8438 /* Only supported on platforms that use atomic watermark design */
8439 if (!dev_priv->display.funcs.wm->optimize_watermarks)
8440 return;
8441
8442 state = drm_atomic_state_alloc(&dev_priv->drm);
8443 if (drm_WARN_ON(&dev_priv->drm, !state))
8444 return;
8445
8446 intel_state = to_intel_atomic_state(state);
8447
8448 drm_modeset_acquire_init(&ctx, 0);
8449
8450 retry:
8451 state->acquire_ctx = &ctx;
8452
8453 /*
8454 * Hardware readout is the only time we don't want to calculate
8455 * intermediate watermarks (since we don't trust the current
8456 * watermarks).
8457 */
8458 if (!HAS_GMCH(dev_priv))
8459 intel_state->skip_intermediate_wm = true;
8460
8461 ret = sanitize_watermarks_add_affected(state);
8462 if (ret)
8463 goto fail;
8464
8465 ret = intel_atomic_check(&dev_priv->drm, state);
8466 if (ret)
8467 goto fail;
8468
8469 /* Write calculated watermark values back */
8470 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
8471 crtc_state->wm.need_postvbl_update = true;
8472 intel_optimize_watermarks(intel_state, crtc);
8473
8474 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
8475 }
8476
8477 fail:
8478 if (ret == -EDEADLK) {
8479 drm_atomic_state_clear(state);
8480 drm_modeset_backoff(&ctx);
8481 goto retry;
8482 }
8483
8484 /*
8485 * If we fail here, it means that the hardware appears to be
8486 * programmed in a way that shouldn't be possible, given our
8487 * understanding of watermark requirements. This might mean a
8488 * mistake in the hardware readout code or a mistake in the
8489 * watermark calculations for a given platform. Raise a WARN
8490 * so that this is noticeable.
8491 *
8492 * If this actually happens, we'll have to just leave the
8493 * BIOS-programmed watermarks untouched and hope for the best.
8494 */
8495 drm_WARN(&dev_priv->drm, ret,
8496 "Could not determine valid watermarks for inherited state\n");
8497
8498 drm_atomic_state_put(state);
8499
8500 drm_modeset_drop_locks(&ctx);
8501 drm_modeset_acquire_fini(&ctx);
8502 }
8503
intel_initial_commit(struct drm_device * dev)8504 static int intel_initial_commit(struct drm_device *dev)
8505 {
8506 struct drm_atomic_state *state = NULL;
8507 struct drm_modeset_acquire_ctx ctx;
8508 struct intel_crtc *crtc;
8509 int ret = 0;
8510
8511 state = drm_atomic_state_alloc(dev);
8512 if (!state)
8513 return -ENOMEM;
8514
8515 drm_modeset_acquire_init(&ctx, 0);
8516
8517 retry:
8518 state->acquire_ctx = &ctx;
8519
8520 for_each_intel_crtc(dev, crtc) {
8521 struct intel_crtc_state *crtc_state =
8522 intel_atomic_get_crtc_state(state, crtc);
8523
8524 if (IS_ERR(crtc_state)) {
8525 ret = PTR_ERR(crtc_state);
8526 goto out;
8527 }
8528
8529 if (crtc_state->hw.active) {
8530 struct intel_encoder *encoder;
8531
8532 /*
8533 * We've not yet detected sink capabilities
8534 * (audio,infoframes,etc.) and thus we don't want to
8535 * force a full state recomputation yet. We want that to
8536 * happen only for the first real commit from userspace.
8537 * So preserve the inherited flag for the time being.
8538 */
8539 crtc_state->inherited = true;
8540
8541 ret = drm_atomic_add_affected_planes(state, &crtc->base);
8542 if (ret)
8543 goto out;
8544
8545 /*
8546 * FIXME hack to force a LUT update to avoid the
8547 * plane update forcing the pipe gamma on without
8548 * having a proper LUT loaded. Remove once we
8549 * have readout for pipe gamma enable.
8550 */
8551 crtc_state->uapi.color_mgmt_changed = true;
8552
8553 for_each_intel_encoder_mask(dev, encoder,
8554 crtc_state->uapi.encoder_mask) {
8555 if (encoder->initial_fastset_check &&
8556 !encoder->initial_fastset_check(encoder, crtc_state)) {
8557 ret = drm_atomic_add_affected_connectors(state,
8558 &crtc->base);
8559 if (ret)
8560 goto out;
8561 }
8562 }
8563 }
8564 }
8565
8566 ret = drm_atomic_commit(state);
8567
8568 out:
8569 if (ret == -EDEADLK) {
8570 drm_atomic_state_clear(state);
8571 drm_modeset_backoff(&ctx);
8572 goto retry;
8573 }
8574
8575 drm_atomic_state_put(state);
8576
8577 drm_modeset_drop_locks(&ctx);
8578 drm_modeset_acquire_fini(&ctx);
8579
8580 return ret;
8581 }
8582
8583 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
8584 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
8585 };
8586
intel_mode_config_init(struct drm_i915_private * i915)8587 static void intel_mode_config_init(struct drm_i915_private *i915)
8588 {
8589 struct drm_mode_config *mode_config = &i915->drm.mode_config;
8590
8591 drm_mode_config_init(&i915->drm);
8592 INIT_LIST_HEAD(&i915->global_obj_list);
8593
8594 mode_config->min_width = 0;
8595 mode_config->min_height = 0;
8596
8597 mode_config->preferred_depth = 24;
8598 mode_config->prefer_shadow = 1;
8599
8600 mode_config->funcs = &intel_mode_funcs;
8601 mode_config->helper_private = &intel_mode_config_funcs;
8602
8603 mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
8604
8605 /*
8606 * Maximum framebuffer dimensions, chosen to match
8607 * the maximum render engine surface size on gen4+.
8608 */
8609 if (DISPLAY_VER(i915) >= 7) {
8610 mode_config->max_width = 16384;
8611 mode_config->max_height = 16384;
8612 } else if (DISPLAY_VER(i915) >= 4) {
8613 mode_config->max_width = 8192;
8614 mode_config->max_height = 8192;
8615 } else if (DISPLAY_VER(i915) == 3) {
8616 mode_config->max_width = 4096;
8617 mode_config->max_height = 4096;
8618 } else {
8619 mode_config->max_width = 2048;
8620 mode_config->max_height = 2048;
8621 }
8622
8623 if (IS_I845G(i915) || IS_I865G(i915)) {
8624 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
8625 mode_config->cursor_height = 1023;
8626 } else if (IS_I830(i915) || IS_I85X(i915) ||
8627 IS_I915G(i915) || IS_I915GM(i915)) {
8628 mode_config->cursor_width = 64;
8629 mode_config->cursor_height = 64;
8630 } else {
8631 mode_config->cursor_width = 256;
8632 mode_config->cursor_height = 256;
8633 }
8634 }
8635
intel_mode_config_cleanup(struct drm_i915_private * i915)8636 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
8637 {
8638 intel_atomic_global_obj_cleanup(i915);
8639 drm_mode_config_cleanup(&i915->drm);
8640 }
8641
8642 /* part #1: call before irq install */
intel_modeset_init_noirq(struct drm_i915_private * i915)8643 int intel_modeset_init_noirq(struct drm_i915_private *i915)
8644 {
8645 int ret;
8646
8647 if (i915_inject_probe_failure(i915))
8648 return -ENODEV;
8649
8650 if (HAS_DISPLAY(i915)) {
8651 ret = drm_vblank_init(&i915->drm,
8652 INTEL_NUM_PIPES(i915));
8653 if (ret)
8654 return ret;
8655 }
8656
8657 intel_bios_init(i915);
8658
8659 ret = intel_vga_register(i915);
8660 if (ret)
8661 goto cleanup_bios;
8662
8663 /* FIXME: completely on the wrong abstraction layer */
8664 intel_power_domains_init_hw(i915, false);
8665
8666 if (!HAS_DISPLAY(i915))
8667 return 0;
8668
8669 intel_dmc_ucode_init(i915);
8670
8671 i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
8672 i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
8673 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
8674
8675 intel_mode_config_init(i915);
8676
8677 ret = intel_cdclk_init(i915);
8678 if (ret)
8679 goto cleanup_vga_client_pw_domain_dmc;
8680
8681 ret = intel_dbuf_init(i915);
8682 if (ret)
8683 goto cleanup_vga_client_pw_domain_dmc;
8684
8685 ret = intel_bw_init(i915);
8686 if (ret)
8687 goto cleanup_vga_client_pw_domain_dmc;
8688
8689 init_llist_head(&i915->display.atomic_helper.free_list);
8690 INIT_WORK(&i915->display.atomic_helper.free_work,
8691 intel_atomic_helper_free_state_worker);
8692
8693 intel_init_quirks(i915);
8694
8695 intel_fbc_init(i915);
8696
8697 return 0;
8698
8699 cleanup_vga_client_pw_domain_dmc:
8700 intel_dmc_ucode_fini(i915);
8701 intel_power_domains_driver_remove(i915);
8702 intel_vga_unregister(i915);
8703 cleanup_bios:
8704 intel_bios_driver_remove(i915);
8705
8706 return ret;
8707 }
8708
8709 /* part #2: call after irq install, but before gem init */
intel_modeset_init_nogem(struct drm_i915_private * i915)8710 int intel_modeset_init_nogem(struct drm_i915_private *i915)
8711 {
8712 struct drm_device *dev = &i915->drm;
8713 enum pipe pipe;
8714 struct intel_crtc *crtc;
8715 int ret;
8716
8717 if (!HAS_DISPLAY(i915))
8718 return 0;
8719
8720 intel_init_pm(i915);
8721
8722 intel_panel_sanitize_ssc(i915);
8723
8724 intel_pps_setup(i915);
8725
8726 intel_gmbus_setup(i915);
8727
8728 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
8729 INTEL_NUM_PIPES(i915),
8730 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
8731
8732 for_each_pipe(i915, pipe) {
8733 ret = intel_crtc_init(i915, pipe);
8734 if (ret) {
8735 intel_mode_config_cleanup(i915);
8736 return ret;
8737 }
8738 }
8739
8740 intel_plane_possible_crtcs_init(i915);
8741 intel_shared_dpll_init(i915);
8742 intel_fdi_pll_freq_update(i915);
8743
8744 intel_update_czclk(i915);
8745 intel_modeset_init_hw(i915);
8746 intel_dpll_update_ref_clks(i915);
8747
8748 intel_hdcp_component_init(i915);
8749
8750 if (i915->display.cdclk.max_cdclk_freq == 0)
8751 intel_update_max_cdclk(i915);
8752
8753 /*
8754 * If the platform has HTI, we need to find out whether it has reserved
8755 * any display resources before we create our display outputs.
8756 */
8757 if (INTEL_INFO(i915)->display.has_hti)
8758 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
8759
8760 /* Just disable it once at startup */
8761 intel_vga_disable(i915);
8762 intel_setup_outputs(i915);
8763
8764 drm_modeset_lock_all(dev);
8765 intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
8766 intel_acpi_assign_connector_fwnodes(i915);
8767 drm_modeset_unlock_all(dev);
8768
8769 for_each_intel_crtc(dev, crtc) {
8770 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
8771 continue;
8772 intel_crtc_initial_plane_config(crtc);
8773 }
8774
8775 /*
8776 * Make sure hardware watermarks really match the state we read out.
8777 * Note that we need to do this after reconstructing the BIOS fb's
8778 * since the watermark calculation done here will use pstate->fb.
8779 */
8780 if (!HAS_GMCH(i915))
8781 sanitize_watermarks(i915);
8782
8783 return 0;
8784 }
8785
8786 /* part #3: call after gem init */
intel_modeset_init(struct drm_i915_private * i915)8787 int intel_modeset_init(struct drm_i915_private *i915)
8788 {
8789 int ret;
8790
8791 if (!HAS_DISPLAY(i915))
8792 return 0;
8793
8794 /*
8795 * Force all active planes to recompute their states. So that on
8796 * mode_setcrtc after probe, all the intel_plane_state variables
8797 * are already calculated and there is no assert_plane warnings
8798 * during bootup.
8799 */
8800 ret = intel_initial_commit(&i915->drm);
8801 if (ret)
8802 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
8803
8804 intel_overlay_setup(i915);
8805
8806 ret = intel_fbdev_init(&i915->drm);
8807 if (ret)
8808 return ret;
8809
8810 /* Only enable hotplug handling once the fbdev is fully set up. */
8811 intel_hpd_init(i915);
8812 intel_hpd_poll_disable(i915);
8813
8814 skl_watermark_ipc_init(i915);
8815
8816 return 0;
8817 }
8818
i830_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)8819 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8820 {
8821 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8822 /* 640x480@60Hz, ~25175 kHz */
8823 struct dpll clock = {
8824 .m1 = 18,
8825 .m2 = 7,
8826 .p1 = 13,
8827 .p2 = 4,
8828 .n = 2,
8829 };
8830 u32 dpll, fp;
8831 int i;
8832
8833 drm_WARN_ON(&dev_priv->drm,
8834 i9xx_calc_dpll_params(48000, &clock) != 25154);
8835
8836 drm_dbg_kms(&dev_priv->drm,
8837 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
8838 pipe_name(pipe), clock.vco, clock.dot);
8839
8840 fp = i9xx_dpll_compute_fp(&clock);
8841 dpll = DPLL_DVO_2X_MODE |
8842 DPLL_VGA_MODE_DIS |
8843 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
8844 PLL_P2_DIVIDE_BY_4 |
8845 PLL_REF_INPUT_DREFCLK |
8846 DPLL_VCO_ENABLE;
8847
8848 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
8849 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
8850 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
8851 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
8852 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
8853 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
8854 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
8855
8856 intel_de_write(dev_priv, FP0(pipe), fp);
8857 intel_de_write(dev_priv, FP1(pipe), fp);
8858
8859 /*
8860 * Apparently we need to have VGA mode enabled prior to changing
8861 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
8862 * dividers, even though the register value does change.
8863 */
8864 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
8865 intel_de_write(dev_priv, DPLL(pipe), dpll);
8866
8867 /* Wait for the clocks to stabilize. */
8868 intel_de_posting_read(dev_priv, DPLL(pipe));
8869 udelay(150);
8870
8871 /* The pixel multiplier can only be updated once the
8872 * DPLL is enabled and the clocks are stable.
8873 *
8874 * So write it again.
8875 */
8876 intel_de_write(dev_priv, DPLL(pipe), dpll);
8877
8878 /* We do this three times for luck */
8879 for (i = 0; i < 3 ; i++) {
8880 intel_de_write(dev_priv, DPLL(pipe), dpll);
8881 intel_de_posting_read(dev_priv, DPLL(pipe));
8882 udelay(150); /* wait for warmup */
8883 }
8884
8885 intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
8886 intel_de_posting_read(dev_priv, PIPECONF(pipe));
8887
8888 intel_wait_for_pipe_scanline_moving(crtc);
8889 }
8890
i830_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)8891 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8892 {
8893 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8894
8895 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
8896 pipe_name(pipe));
8897
8898 drm_WARN_ON(&dev_priv->drm,
8899 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
8900 drm_WARN_ON(&dev_priv->drm,
8901 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
8902 drm_WARN_ON(&dev_priv->drm,
8903 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
8904 drm_WARN_ON(&dev_priv->drm,
8905 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
8906 drm_WARN_ON(&dev_priv->drm,
8907 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
8908
8909 intel_de_write(dev_priv, PIPECONF(pipe), 0);
8910 intel_de_posting_read(dev_priv, PIPECONF(pipe));
8911
8912 intel_wait_for_pipe_scanline_stopped(crtc);
8913
8914 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
8915 intel_de_posting_read(dev_priv, DPLL(pipe));
8916 }
8917
intel_display_resume(struct drm_device * dev)8918 void intel_display_resume(struct drm_device *dev)
8919 {
8920 struct drm_i915_private *i915 = to_i915(dev);
8921 struct drm_atomic_state *state = i915->modeset_restore_state;
8922 struct drm_modeset_acquire_ctx ctx;
8923 int ret;
8924
8925 if (!HAS_DISPLAY(i915))
8926 return;
8927
8928 i915->modeset_restore_state = NULL;
8929 if (state)
8930 state->acquire_ctx = &ctx;
8931
8932 drm_modeset_acquire_init(&ctx, 0);
8933
8934 while (1) {
8935 ret = drm_modeset_lock_all_ctx(dev, &ctx);
8936 if (ret != -EDEADLK)
8937 break;
8938
8939 drm_modeset_backoff(&ctx);
8940 }
8941
8942 if (!ret)
8943 ret = __intel_display_resume(i915, state, &ctx);
8944
8945 skl_watermark_ipc_update(i915);
8946 drm_modeset_drop_locks(&ctx);
8947 drm_modeset_acquire_fini(&ctx);
8948
8949 if (ret)
8950 drm_err(&i915->drm,
8951 "Restoring old state failed with %i\n", ret);
8952 if (state)
8953 drm_atomic_state_put(state);
8954 }
8955
intel_hpd_poll_fini(struct drm_i915_private * i915)8956 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
8957 {
8958 struct intel_connector *connector;
8959 struct drm_connector_list_iter conn_iter;
8960
8961 /* Kill all the work that may have been queued by hpd. */
8962 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
8963 for_each_intel_connector_iter(connector, &conn_iter) {
8964 if (connector->modeset_retry_work.func)
8965 cancel_work_sync(&connector->modeset_retry_work);
8966 if (connector->hdcp.shim) {
8967 cancel_delayed_work_sync(&connector->hdcp.check_work);
8968 cancel_work_sync(&connector->hdcp.prop_work);
8969 }
8970 }
8971 drm_connector_list_iter_end(&conn_iter);
8972 }
8973
8974 /* part #1: call before irq uninstall */
intel_modeset_driver_remove(struct drm_i915_private * i915)8975 void intel_modeset_driver_remove(struct drm_i915_private *i915)
8976 {
8977 if (!HAS_DISPLAY(i915))
8978 return;
8979
8980 flush_workqueue(i915->display.wq.flip);
8981 flush_workqueue(i915->display.wq.modeset);
8982
8983 flush_work(&i915->display.atomic_helper.free_work);
8984 drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
8985
8986 /*
8987 * MST topology needs to be suspended so we don't have any calls to
8988 * fbdev after it's finalized. MST will be destroyed later as part of
8989 * drm_mode_config_cleanup()
8990 */
8991 intel_dp_mst_suspend(i915);
8992 }
8993
8994 /* part #2: call after irq uninstall */
intel_modeset_driver_remove_noirq(struct drm_i915_private * i915)8995 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
8996 {
8997 if (!HAS_DISPLAY(i915))
8998 return;
8999
9000 /*
9001 * Due to the hpd irq storm handling the hotplug work can re-arm the
9002 * poll handlers. Hence disable polling after hpd handling is shut down.
9003 */
9004 intel_hpd_poll_fini(i915);
9005
9006 /* poll work can call into fbdev, hence clean that up afterwards */
9007 intel_fbdev_fini(i915);
9008
9009 intel_unregister_dsm_handler();
9010
9011 /* flush any delayed tasks or pending work */
9012 flush_scheduled_work();
9013
9014 intel_hdcp_component_fini(i915);
9015
9016 intel_mode_config_cleanup(i915);
9017
9018 intel_overlay_cleanup(i915);
9019
9020 intel_gmbus_teardown(i915);
9021
9022 destroy_workqueue(i915->display.wq.flip);
9023 destroy_workqueue(i915->display.wq.modeset);
9024
9025 intel_fbc_cleanup(i915);
9026 }
9027
9028 /* part #3: call after gem init */
intel_modeset_driver_remove_nogem(struct drm_i915_private * i915)9029 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
9030 {
9031 intel_dmc_ucode_fini(i915);
9032
9033 intel_power_domains_driver_remove(i915);
9034
9035 intel_vga_unregister(i915);
9036
9037 intel_bios_driver_remove(i915);
9038 }
9039
intel_modeset_probe_defer(struct pci_dev * pdev)9040 bool intel_modeset_probe_defer(struct pci_dev *pdev)
9041 {
9042 struct drm_privacy_screen *privacy_screen;
9043
9044 /*
9045 * apple-gmux is needed on dual GPU MacBook Pro
9046 * to probe the panel if we're the inactive GPU.
9047 */
9048 if (vga_switcheroo_client_probe_defer(pdev))
9049 return true;
9050
9051 /* If the LCD panel has a privacy-screen, wait for it */
9052 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
9053 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
9054 return true;
9055
9056 drm_privacy_screen_put(privacy_screen);
9057
9058 return false;
9059 }
9060
intel_display_driver_register(struct drm_i915_private * i915)9061 void intel_display_driver_register(struct drm_i915_private *i915)
9062 {
9063 if (!HAS_DISPLAY(i915))
9064 return;
9065
9066 intel_display_debugfs_register(i915);
9067
9068 /* Must be done after probing outputs */
9069 intel_opregion_register(i915);
9070 intel_acpi_video_register(i915);
9071
9072 intel_audio_init(i915);
9073
9074 /*
9075 * Some ports require correctly set-up hpd registers for
9076 * detection to work properly (leading to ghost connected
9077 * connector status), e.g. VGA on gm45. Hence we can only set
9078 * up the initial fbdev config after hpd irqs are fully
9079 * enabled. We do it last so that the async config cannot run
9080 * before the connectors are registered.
9081 */
9082 intel_fbdev_initial_config_async(&i915->drm);
9083
9084 /*
9085 * We need to coordinate the hotplugs with the asynchronous
9086 * fbdev configuration, for which we use the
9087 * fbdev->async_cookie.
9088 */
9089 drm_kms_helper_poll_init(&i915->drm);
9090 }
9091
intel_display_driver_unregister(struct drm_i915_private * i915)9092 void intel_display_driver_unregister(struct drm_i915_private *i915)
9093 {
9094 if (!HAS_DISPLAY(i915))
9095 return;
9096
9097 intel_fbdev_unregister(i915);
9098 intel_audio_deinit(i915);
9099
9100 /*
9101 * After flushing the fbdev (incl. a late async config which
9102 * will have delayed queuing of a hotplug event), then flush
9103 * the hotplug events.
9104 */
9105 drm_kms_helper_poll_fini(&i915->drm);
9106 drm_atomic_helper_shutdown(&i915->drm);
9107
9108 acpi_video_unregister();
9109 intel_opregion_unregister(i915);
9110 }
9111
intel_scanout_needs_vtd_wa(struct drm_i915_private * i915)9112 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
9113 {
9114 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
9115 }
9116