1 /*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28 #include <linux/dma-fence.h>
29 #include <linux/ktime.h>
30
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_atomic_uapi.h>
34 #include <drm/drm_blend.h>
35 #include <drm/drm_bridge.h>
36 #include <drm/drm_damage_helper.h>
37 #include <drm/drm_device.h>
38 #include <drm/drm_drv.h>
39 #include <drm/drm_framebuffer.h>
40 #include <drm/drm_gem_atomic_helper.h>
41 #include <drm/drm_print.h>
42 #include <drm/drm_self_refresh_helper.h>
43 #include <drm/drm_vblank.h>
44 #include <drm/drm_writeback.h>
45
46 #include "drm_crtc_helper_internal.h"
47 #include "drm_crtc_internal.h"
48
49 /**
50 * DOC: overview
51 *
52 * This helper library provides implementations of check and commit functions on
53 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
54 * also provides convenience implementations for the atomic state handling
55 * callbacks for drivers which don't need to subclass the drm core structures to
56 * add their own additional internal state.
57 *
58 * This library also provides default implementations for the check callback in
59 * drm_atomic_helper_check() and for the commit callback with
60 * drm_atomic_helper_commit(). But the individual stages and callbacks are
61 * exposed to allow drivers to mix and match and e.g. use the plane helpers only
62 * together with a driver private modeset implementation.
63 *
64 * This library also provides implementations for all the legacy driver
65 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
66 * drm_atomic_helper_disable_plane(), and the various functions to implement
67 * set_property callbacks. New drivers must not implement these functions
68 * themselves but must use the provided helpers.
69 *
70 * The atomic helper uses the same function table structures as all other
71 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
72 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
73 * also shares the &struct drm_plane_helper_funcs function table with the plane
74 * helpers.
75 */
76 static void
drm_atomic_helper_plane_changed(struct drm_atomic_state * state,struct drm_plane_state * old_plane_state,struct drm_plane_state * plane_state,struct drm_plane * plane)77 drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
78 struct drm_plane_state *old_plane_state,
79 struct drm_plane_state *plane_state,
80 struct drm_plane *plane)
81 {
82 struct drm_crtc_state *crtc_state;
83
84 if (old_plane_state->crtc) {
85 crtc_state = drm_atomic_get_new_crtc_state(state,
86 old_plane_state->crtc);
87
88 if (WARN_ON(!crtc_state))
89 return;
90
91 crtc_state->planes_changed = true;
92 }
93
94 if (plane_state->crtc) {
95 crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
96
97 if (WARN_ON(!crtc_state))
98 return;
99
100 crtc_state->planes_changed = true;
101 }
102 }
103
handle_conflicting_encoders(struct drm_atomic_state * state,bool disable_conflicting_encoders)104 static int handle_conflicting_encoders(struct drm_atomic_state *state,
105 bool disable_conflicting_encoders)
106 {
107 struct drm_connector_state *new_conn_state;
108 struct drm_connector *connector;
109 struct drm_connector_list_iter conn_iter;
110 struct drm_encoder *encoder;
111 unsigned int encoder_mask = 0;
112 int i, ret = 0;
113
114 /*
115 * First loop, find all newly assigned encoders from the connectors
116 * part of the state. If the same encoder is assigned to multiple
117 * connectors bail out.
118 */
119 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
120 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
121 struct drm_encoder *new_encoder;
122
123 if (!new_conn_state->crtc)
124 continue;
125
126 if (funcs->atomic_best_encoder)
127 new_encoder = funcs->atomic_best_encoder(connector,
128 state);
129 else if (funcs->best_encoder)
130 new_encoder = funcs->best_encoder(connector);
131 else
132 new_encoder = drm_connector_get_single_encoder(connector);
133
134 if (new_encoder) {
135 if (encoder_mask & drm_encoder_mask(new_encoder)) {
136 drm_dbg_atomic(connector->dev,
137 "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
138 new_encoder->base.id, new_encoder->name,
139 connector->base.id, connector->name);
140
141 return -EINVAL;
142 }
143
144 encoder_mask |= drm_encoder_mask(new_encoder);
145 }
146 }
147
148 if (!encoder_mask)
149 return 0;
150
151 /*
152 * Second loop, iterate over all connectors not part of the state.
153 *
154 * If a conflicting encoder is found and disable_conflicting_encoders
155 * is not set, an error is returned. Userspace can provide a solution
156 * through the atomic ioctl.
157 *
158 * If the flag is set conflicting connectors are removed from the CRTC
159 * and the CRTC is disabled if no encoder is left. This preserves
160 * compatibility with the legacy set_config behavior.
161 */
162 drm_connector_list_iter_begin(state->dev, &conn_iter);
163 drm_for_each_connector_iter(connector, &conn_iter) {
164 struct drm_crtc_state *crtc_state;
165
166 if (drm_atomic_get_new_connector_state(state, connector))
167 continue;
168
169 encoder = connector->state->best_encoder;
170 if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
171 continue;
172
173 if (!disable_conflicting_encoders) {
174 drm_dbg_atomic(connector->dev,
175 "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
176 encoder->base.id, encoder->name,
177 connector->state->crtc->base.id,
178 connector->state->crtc->name,
179 connector->base.id, connector->name);
180 ret = -EINVAL;
181 goto out;
182 }
183
184 new_conn_state = drm_atomic_get_connector_state(state, connector);
185 if (IS_ERR(new_conn_state)) {
186 ret = PTR_ERR(new_conn_state);
187 goto out;
188 }
189
190 drm_dbg_atomic(connector->dev,
191 "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
192 encoder->base.id, encoder->name,
193 new_conn_state->crtc->base.id, new_conn_state->crtc->name,
194 connector->base.id, connector->name);
195
196 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
197
198 ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
199 if (ret)
200 goto out;
201
202 if (!crtc_state->connector_mask) {
203 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
204 NULL);
205 if (ret < 0)
206 goto out;
207
208 crtc_state->active = false;
209 }
210 }
211 out:
212 drm_connector_list_iter_end(&conn_iter);
213
214 return ret;
215 }
216
217 static void
set_best_encoder(struct drm_atomic_state * state,struct drm_connector_state * conn_state,struct drm_encoder * encoder)218 set_best_encoder(struct drm_atomic_state *state,
219 struct drm_connector_state *conn_state,
220 struct drm_encoder *encoder)
221 {
222 struct drm_crtc_state *crtc_state;
223 struct drm_crtc *crtc;
224
225 if (conn_state->best_encoder) {
226 /* Unset the encoder_mask in the old crtc state. */
227 crtc = conn_state->connector->state->crtc;
228
229 /* A NULL crtc is an error here because we should have
230 * duplicated a NULL best_encoder when crtc was NULL.
231 * As an exception restoring duplicated atomic state
232 * during resume is allowed, so don't warn when
233 * best_encoder is equal to encoder we intend to set.
234 */
235 WARN_ON(!crtc && encoder != conn_state->best_encoder);
236 if (crtc) {
237 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
238
239 crtc_state->encoder_mask &=
240 ~drm_encoder_mask(conn_state->best_encoder);
241 }
242 }
243
244 if (encoder) {
245 crtc = conn_state->crtc;
246 WARN_ON(!crtc);
247 if (crtc) {
248 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
249
250 crtc_state->encoder_mask |=
251 drm_encoder_mask(encoder);
252 }
253 }
254
255 conn_state->best_encoder = encoder;
256 }
257
258 static void
steal_encoder(struct drm_atomic_state * state,struct drm_encoder * encoder)259 steal_encoder(struct drm_atomic_state *state,
260 struct drm_encoder *encoder)
261 {
262 struct drm_crtc_state *crtc_state;
263 struct drm_connector *connector;
264 struct drm_connector_state *old_connector_state, *new_connector_state;
265 int i;
266
267 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
268 struct drm_crtc *encoder_crtc;
269
270 if (new_connector_state->best_encoder != encoder)
271 continue;
272
273 encoder_crtc = old_connector_state->crtc;
274
275 drm_dbg_atomic(encoder->dev,
276 "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
277 encoder->base.id, encoder->name,
278 encoder_crtc->base.id, encoder_crtc->name);
279
280 set_best_encoder(state, new_connector_state, NULL);
281
282 crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
283 crtc_state->connectors_changed = true;
284
285 return;
286 }
287 }
288
289 static int
update_connector_routing(struct drm_atomic_state * state,struct drm_connector * connector,struct drm_connector_state * old_connector_state,struct drm_connector_state * new_connector_state)290 update_connector_routing(struct drm_atomic_state *state,
291 struct drm_connector *connector,
292 struct drm_connector_state *old_connector_state,
293 struct drm_connector_state *new_connector_state)
294 {
295 const struct drm_connector_helper_funcs *funcs;
296 struct drm_encoder *new_encoder;
297 struct drm_crtc_state *crtc_state;
298
299 drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n",
300 connector->base.id, connector->name);
301
302 if (old_connector_state->crtc != new_connector_state->crtc) {
303 if (old_connector_state->crtc) {
304 crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
305 crtc_state->connectors_changed = true;
306 }
307
308 if (new_connector_state->crtc) {
309 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
310 crtc_state->connectors_changed = true;
311 }
312 }
313
314 if (!new_connector_state->crtc) {
315 drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n",
316 connector->base.id, connector->name);
317
318 set_best_encoder(state, new_connector_state, NULL);
319
320 return 0;
321 }
322
323 crtc_state = drm_atomic_get_new_crtc_state(state,
324 new_connector_state->crtc);
325 /*
326 * For compatibility with legacy users, we want to make sure that
327 * we allow DPMS On->Off modesets on unregistered connectors. Modesets
328 * which would result in anything else must be considered invalid, to
329 * avoid turning on new displays on dead connectors.
330 *
331 * Since the connector can be unregistered at any point during an
332 * atomic check or commit, this is racy. But that's OK: all we care
333 * about is ensuring that userspace can't do anything but shut off the
334 * display on a connector that was destroyed after it's been notified,
335 * not before.
336 *
337 * Additionally, we also want to ignore connector registration when
338 * we're trying to restore an atomic state during system resume since
339 * there's a chance the connector may have been destroyed during the
340 * process, but it's better to ignore that then cause
341 * drm_atomic_helper_resume() to fail.
342 */
343 if (!state->duplicated && drm_connector_is_unregistered(connector) &&
344 crtc_state->active) {
345 drm_dbg_atomic(connector->dev,
346 "[CONNECTOR:%d:%s] is not registered\n",
347 connector->base.id, connector->name);
348 return -EINVAL;
349 }
350
351 funcs = connector->helper_private;
352
353 if (funcs->atomic_best_encoder)
354 new_encoder = funcs->atomic_best_encoder(connector, state);
355 else if (funcs->best_encoder)
356 new_encoder = funcs->best_encoder(connector);
357 else
358 new_encoder = drm_connector_get_single_encoder(connector);
359
360 if (!new_encoder) {
361 drm_dbg_atomic(connector->dev,
362 "No suitable encoder found for [CONNECTOR:%d:%s]\n",
363 connector->base.id, connector->name);
364 return -EINVAL;
365 }
366
367 if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
368 drm_dbg_atomic(connector->dev,
369 "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
370 new_encoder->base.id,
371 new_encoder->name,
372 new_connector_state->crtc->base.id,
373 new_connector_state->crtc->name);
374 return -EINVAL;
375 }
376
377 if (new_encoder == new_connector_state->best_encoder) {
378 set_best_encoder(state, new_connector_state, new_encoder);
379
380 drm_dbg_atomic(connector->dev,
381 "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
382 connector->base.id,
383 connector->name,
384 new_encoder->base.id,
385 new_encoder->name,
386 new_connector_state->crtc->base.id,
387 new_connector_state->crtc->name);
388
389 return 0;
390 }
391
392 steal_encoder(state, new_encoder);
393
394 set_best_encoder(state, new_connector_state, new_encoder);
395
396 crtc_state->connectors_changed = true;
397
398 drm_dbg_atomic(connector->dev,
399 "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
400 connector->base.id,
401 connector->name,
402 new_encoder->base.id,
403 new_encoder->name,
404 new_connector_state->crtc->base.id,
405 new_connector_state->crtc->name);
406
407 return 0;
408 }
409
410 static int
mode_fixup(struct drm_atomic_state * state)411 mode_fixup(struct drm_atomic_state *state)
412 {
413 struct drm_crtc *crtc;
414 struct drm_crtc_state *new_crtc_state;
415 struct drm_connector *connector;
416 struct drm_connector_state *new_conn_state;
417 int i;
418 int ret;
419
420 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
421 if (!new_crtc_state->mode_changed &&
422 !new_crtc_state->connectors_changed)
423 continue;
424
425 drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
426 }
427
428 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
429 const struct drm_encoder_helper_funcs *funcs;
430 struct drm_encoder *encoder;
431 struct drm_bridge *bridge;
432
433 WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
434
435 if (!new_conn_state->crtc || !new_conn_state->best_encoder)
436 continue;
437
438 new_crtc_state =
439 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
440
441 /*
442 * Each encoder has at most one connector (since we always steal
443 * it away), so we won't call ->mode_fixup twice.
444 */
445 encoder = new_conn_state->best_encoder;
446 funcs = encoder->helper_private;
447
448 bridge = drm_bridge_chain_get_first_bridge(encoder);
449 ret = drm_atomic_bridge_chain_check(bridge,
450 new_crtc_state,
451 new_conn_state);
452 if (ret) {
453 drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
454 return ret;
455 }
456
457 if (funcs && funcs->atomic_check) {
458 ret = funcs->atomic_check(encoder, new_crtc_state,
459 new_conn_state);
460 if (ret) {
461 drm_dbg_atomic(encoder->dev,
462 "[ENCODER:%d:%s] check failed\n",
463 encoder->base.id, encoder->name);
464 return ret;
465 }
466 } else if (funcs && funcs->mode_fixup) {
467 ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
468 &new_crtc_state->adjusted_mode);
469 if (!ret) {
470 drm_dbg_atomic(encoder->dev,
471 "[ENCODER:%d:%s] fixup failed\n",
472 encoder->base.id, encoder->name);
473 return -EINVAL;
474 }
475 }
476 }
477
478 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
479 const struct drm_crtc_helper_funcs *funcs;
480
481 if (!new_crtc_state->enable)
482 continue;
483
484 if (!new_crtc_state->mode_changed &&
485 !new_crtc_state->connectors_changed)
486 continue;
487
488 funcs = crtc->helper_private;
489 if (!funcs || !funcs->mode_fixup)
490 continue;
491
492 ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
493 &new_crtc_state->adjusted_mode);
494 if (!ret) {
495 drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n",
496 crtc->base.id, crtc->name);
497 return -EINVAL;
498 }
499 }
500
501 return 0;
502 }
503
mode_valid_path(struct drm_connector * connector,struct drm_encoder * encoder,struct drm_crtc * crtc,const struct drm_display_mode * mode)504 static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
505 struct drm_encoder *encoder,
506 struct drm_crtc *crtc,
507 const struct drm_display_mode *mode)
508 {
509 struct drm_bridge *bridge;
510 enum drm_mode_status ret;
511
512 ret = drm_encoder_mode_valid(encoder, mode);
513 if (ret != MODE_OK) {
514 drm_dbg_atomic(encoder->dev,
515 "[ENCODER:%d:%s] mode_valid() failed\n",
516 encoder->base.id, encoder->name);
517 return ret;
518 }
519
520 bridge = drm_bridge_chain_get_first_bridge(encoder);
521 ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
522 mode);
523 if (ret != MODE_OK) {
524 drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
525 return ret;
526 }
527
528 ret = drm_crtc_mode_valid(crtc, mode);
529 if (ret != MODE_OK) {
530 drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n",
531 crtc->base.id, crtc->name);
532 return ret;
533 }
534
535 return ret;
536 }
537
538 static int
mode_valid(struct drm_atomic_state * state)539 mode_valid(struct drm_atomic_state *state)
540 {
541 struct drm_connector_state *conn_state;
542 struct drm_connector *connector;
543 int i;
544
545 for_each_new_connector_in_state(state, connector, conn_state, i) {
546 struct drm_encoder *encoder = conn_state->best_encoder;
547 struct drm_crtc *crtc = conn_state->crtc;
548 struct drm_crtc_state *crtc_state;
549 enum drm_mode_status mode_status;
550 const struct drm_display_mode *mode;
551
552 if (!crtc || !encoder)
553 continue;
554
555 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
556 if (!crtc_state)
557 continue;
558 if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
559 continue;
560
561 mode = &crtc_state->mode;
562
563 mode_status = mode_valid_path(connector, encoder, crtc, mode);
564 if (mode_status != MODE_OK)
565 return -EINVAL;
566 }
567
568 return 0;
569 }
570
571 /**
572 * drm_atomic_helper_check_modeset - validate state object for modeset changes
573 * @dev: DRM device
574 * @state: the driver state object
575 *
576 * Check the state object to see if the requested state is physically possible.
577 * This does all the CRTC and connector related computations for an atomic
578 * update and adds any additional connectors needed for full modesets. It calls
579 * the various per-object callbacks in the follow order:
580 *
581 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
582 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
583 * 3. If it's determined a modeset is needed then all connectors on the affected
584 * CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
585 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
586 * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
587 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
588 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
589 * This function is only called when the encoder will be part of a configured CRTC,
590 * it must not be used for implementing connector property validation.
591 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
592 * instead.
593 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
594 *
595 * &drm_crtc_state.mode_changed is set when the input mode is changed.
596 * &drm_crtc_state.connectors_changed is set when a connector is added or
597 * removed from the CRTC. &drm_crtc_state.active_changed is set when
598 * &drm_crtc_state.active changes, which is used for DPMS.
599 * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
600 * See also: drm_atomic_crtc_needs_modeset()
601 *
602 * IMPORTANT:
603 *
604 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
605 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
606 * without a full modeset) _must_ call this function after that change. It is
607 * permitted to call this function multiple times for the same update, e.g.
608 * when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
609 * adjusted dotclock for fifo space allocation and watermark computation.
610 *
611 * RETURNS:
612 * Zero for success or -errno
613 */
614 int
drm_atomic_helper_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)615 drm_atomic_helper_check_modeset(struct drm_device *dev,
616 struct drm_atomic_state *state)
617 {
618 struct drm_crtc *crtc;
619 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
620 struct drm_connector *connector;
621 struct drm_connector_state *old_connector_state, *new_connector_state;
622 int i, ret;
623 unsigned int connectors_mask = 0;
624
625 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
626 bool has_connectors =
627 !!new_crtc_state->connector_mask;
628
629 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
630
631 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
632 drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n",
633 crtc->base.id, crtc->name);
634 new_crtc_state->mode_changed = true;
635 }
636
637 if (old_crtc_state->enable != new_crtc_state->enable) {
638 drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n",
639 crtc->base.id, crtc->name);
640
641 /*
642 * For clarity this assignment is done here, but
643 * enable == 0 is only true when there are no
644 * connectors and a NULL mode.
645 *
646 * The other way around is true as well. enable != 0
647 * implies that connectors are attached and a mode is set.
648 */
649 new_crtc_state->mode_changed = true;
650 new_crtc_state->connectors_changed = true;
651 }
652
653 if (old_crtc_state->active != new_crtc_state->active) {
654 drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n",
655 crtc->base.id, crtc->name);
656 new_crtc_state->active_changed = true;
657 }
658
659 if (new_crtc_state->enable != has_connectors) {
660 drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch\n",
661 crtc->base.id, crtc->name);
662
663 return -EINVAL;
664 }
665
666 if (drm_dev_has_vblank(dev))
667 new_crtc_state->no_vblank = false;
668 else
669 new_crtc_state->no_vblank = true;
670 }
671
672 ret = handle_conflicting_encoders(state, false);
673 if (ret)
674 return ret;
675
676 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
677 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
678
679 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
680
681 /*
682 * This only sets crtc->connectors_changed for routing changes,
683 * drivers must set crtc->connectors_changed themselves when
684 * connector properties need to be updated.
685 */
686 ret = update_connector_routing(state, connector,
687 old_connector_state,
688 new_connector_state);
689 if (ret)
690 return ret;
691 if (old_connector_state->crtc) {
692 new_crtc_state = drm_atomic_get_new_crtc_state(state,
693 old_connector_state->crtc);
694 if (old_connector_state->link_status !=
695 new_connector_state->link_status)
696 new_crtc_state->connectors_changed = true;
697
698 if (old_connector_state->max_requested_bpc !=
699 new_connector_state->max_requested_bpc)
700 new_crtc_state->connectors_changed = true;
701 }
702
703 if (funcs->atomic_check)
704 ret = funcs->atomic_check(connector, state);
705 if (ret) {
706 drm_dbg_atomic(dev,
707 "[CONNECTOR:%d:%s] driver check failed\n",
708 connector->base.id, connector->name);
709 return ret;
710 }
711
712 connectors_mask |= BIT(i);
713 }
714
715 /*
716 * After all the routing has been prepared we need to add in any
717 * connector which is itself unchanged, but whose CRTC changes its
718 * configuration. This must be done before calling mode_fixup in case a
719 * crtc only changed its mode but has the same set of connectors.
720 */
721 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
722 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
723 continue;
724
725 drm_dbg_atomic(dev,
726 "[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
727 crtc->base.id, crtc->name,
728 new_crtc_state->enable ? 'y' : 'n',
729 new_crtc_state->active ? 'y' : 'n');
730
731 ret = drm_atomic_add_affected_connectors(state, crtc);
732 if (ret != 0)
733 return ret;
734
735 ret = drm_atomic_add_affected_planes(state, crtc);
736 if (ret != 0)
737 return ret;
738 }
739
740 /*
741 * Iterate over all connectors again, to make sure atomic_check()
742 * has been called on them when a modeset is forced.
743 */
744 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
745 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
746
747 if (connectors_mask & BIT(i))
748 continue;
749
750 if (funcs->atomic_check)
751 ret = funcs->atomic_check(connector, state);
752 if (ret) {
753 drm_dbg_atomic(dev,
754 "[CONNECTOR:%d:%s] driver check failed\n",
755 connector->base.id, connector->name);
756 return ret;
757 }
758 }
759
760 /*
761 * Iterate over all connectors again, and add all affected bridges to
762 * the state.
763 */
764 for_each_oldnew_connector_in_state(state, connector,
765 old_connector_state,
766 new_connector_state, i) {
767 struct drm_encoder *encoder;
768
769 encoder = old_connector_state->best_encoder;
770 ret = drm_atomic_add_encoder_bridges(state, encoder);
771 if (ret)
772 return ret;
773
774 encoder = new_connector_state->best_encoder;
775 ret = drm_atomic_add_encoder_bridges(state, encoder);
776 if (ret)
777 return ret;
778 }
779
780 ret = mode_valid(state);
781 if (ret)
782 return ret;
783
784 return mode_fixup(state);
785 }
786 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
787
788 /**
789 * drm_atomic_helper_check_wb_encoder_state() - Check writeback encoder state
790 * @encoder: encoder state to check
791 * @conn_state: connector state to check
792 *
793 * Checks if the writeback connector state is valid, and returns an error if it
794 * isn't.
795 *
796 * RETURNS:
797 * Zero for success or -errno
798 */
799 int
drm_atomic_helper_check_wb_encoder_state(struct drm_encoder * encoder,struct drm_connector_state * conn_state)800 drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
801 struct drm_connector_state *conn_state)
802 {
803 struct drm_writeback_job *wb_job = conn_state->writeback_job;
804 struct drm_property_blob *pixel_format_blob;
805 struct drm_framebuffer *fb;
806 size_t i, nformats;
807 u32 *formats;
808
809 if (!wb_job || !wb_job->fb)
810 return 0;
811
812 pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr;
813 nformats = pixel_format_blob->length / sizeof(u32);
814 formats = pixel_format_blob->data;
815 fb = wb_job->fb;
816
817 for (i = 0; i < nformats; i++)
818 if (fb->format->format == formats[i])
819 return 0;
820
821 drm_dbg_kms(encoder->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
822
823 return -EINVAL;
824 }
825 EXPORT_SYMBOL(drm_atomic_helper_check_wb_encoder_state);
826
827 /**
828 * drm_atomic_helper_check_plane_state() - Check plane state for validity
829 * @plane_state: plane state to check
830 * @crtc_state: CRTC state to check
831 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
832 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
833 * @can_position: is it legal to position the plane such that it
834 * doesn't cover the entire CRTC? This will generally
835 * only be false for primary planes.
836 * @can_update_disabled: can the plane be updated while the CRTC
837 * is disabled?
838 *
839 * Checks that a desired plane update is valid, and updates various
840 * bits of derived state (clipped coordinates etc.). Drivers that provide
841 * their own plane handling rather than helper-provided implementations may
842 * still wish to call this function to avoid duplication of error checking
843 * code.
844 *
845 * RETURNS:
846 * Zero if update appears valid, error code on failure
847 */
drm_atomic_helper_check_plane_state(struct drm_plane_state * plane_state,const struct drm_crtc_state * crtc_state,int min_scale,int max_scale,bool can_position,bool can_update_disabled)848 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
849 const struct drm_crtc_state *crtc_state,
850 int min_scale,
851 int max_scale,
852 bool can_position,
853 bool can_update_disabled)
854 {
855 struct drm_framebuffer *fb = plane_state->fb;
856 struct drm_rect *src = &plane_state->src;
857 struct drm_rect *dst = &plane_state->dst;
858 unsigned int rotation = plane_state->rotation;
859 struct drm_rect clip = {};
860 int hscale, vscale;
861
862 WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
863
864 *src = drm_plane_state_src(plane_state);
865 *dst = drm_plane_state_dest(plane_state);
866
867 if (!fb) {
868 plane_state->visible = false;
869 return 0;
870 }
871
872 /* crtc should only be NULL when disabling (i.e., !fb) */
873 if (WARN_ON(!plane_state->crtc)) {
874 plane_state->visible = false;
875 return 0;
876 }
877
878 if (!crtc_state->enable && !can_update_disabled) {
879 drm_dbg_kms(plane_state->plane->dev,
880 "Cannot update plane of a disabled CRTC.\n");
881 return -EINVAL;
882 }
883
884 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
885
886 /* Check scaling */
887 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
888 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
889 if (hscale < 0 || vscale < 0) {
890 drm_dbg_kms(plane_state->plane->dev,
891 "Invalid scaling of plane\n");
892 drm_rect_debug_print("src: ", &plane_state->src, true);
893 drm_rect_debug_print("dst: ", &plane_state->dst, false);
894 return -ERANGE;
895 }
896
897 if (crtc_state->enable)
898 drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
899
900 plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
901
902 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
903
904 if (!plane_state->visible)
905 /*
906 * Plane isn't visible; some drivers can handle this
907 * so we just return success here. Drivers that can't
908 * (including those that use the primary plane helper's
909 * update function) will return an error from their
910 * update_plane handler.
911 */
912 return 0;
913
914 if (!can_position && !drm_rect_equals(dst, &clip)) {
915 drm_dbg_kms(plane_state->plane->dev,
916 "Plane must cover entire CRTC\n");
917 drm_rect_debug_print("dst: ", dst, false);
918 drm_rect_debug_print("clip: ", &clip, false);
919 return -EINVAL;
920 }
921
922 return 0;
923 }
924 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
925
926 /**
927 * drm_atomic_helper_check_crtc_state() - Check CRTC state for validity
928 * @crtc_state: CRTC state to check
929 * @can_disable_primary_planes: can the CRTC be enabled without a primary plane?
930 *
931 * Checks that a desired CRTC update is valid. Drivers that provide
932 * their own CRTC handling rather than helper-provided implementations may
933 * still wish to call this function to avoid duplication of error checking
934 * code.
935 *
936 * Note that @can_disable_primary_planes only tests if the CRTC can be
937 * enabled without a primary plane. To test if a primary plane can be updated
938 * without a CRTC, use drm_atomic_helper_check_plane_state() in the plane's
939 * atomic check.
940 *
941 * RETURNS:
942 * Zero if update appears valid, error code on failure
943 */
drm_atomic_helper_check_crtc_state(struct drm_crtc_state * crtc_state,bool can_disable_primary_planes)944 int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
945 bool can_disable_primary_planes)
946 {
947 struct drm_device *dev = crtc_state->crtc->dev;
948
949 if (!crtc_state->enable)
950 return 0;
951
952 /* needs at least one primary plane to be enabled */
953 if (!can_disable_primary_planes) {
954 bool has_primary_plane = false;
955 struct drm_plane *plane;
956
957 drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
958 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
959 has_primary_plane = true;
960 break;
961 }
962 }
963 if (!has_primary_plane) {
964 drm_dbg_kms(dev, "Cannot enable CRTC without a primary plane.\n");
965 return -EINVAL;
966 }
967 }
968
969 return 0;
970 }
971 EXPORT_SYMBOL(drm_atomic_helper_check_crtc_state);
972
973 /**
974 * drm_atomic_helper_check_planes - validate state object for planes changes
975 * @dev: DRM device
976 * @state: the driver state object
977 *
978 * Check the state object to see if the requested state is physically possible.
979 * This does all the plane update related checks using by calling into the
980 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
981 * hooks provided by the driver.
982 *
983 * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
984 * updated planes.
985 *
986 * RETURNS:
987 * Zero for success or -errno
988 */
989 int
drm_atomic_helper_check_planes(struct drm_device * dev,struct drm_atomic_state * state)990 drm_atomic_helper_check_planes(struct drm_device *dev,
991 struct drm_atomic_state *state)
992 {
993 struct drm_crtc *crtc;
994 struct drm_crtc_state *new_crtc_state;
995 struct drm_plane *plane;
996 struct drm_plane_state *new_plane_state, *old_plane_state;
997 int i, ret = 0;
998
999 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1000 const struct drm_plane_helper_funcs *funcs;
1001
1002 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1003
1004 funcs = plane->helper_private;
1005
1006 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
1007
1008 drm_atomic_helper_check_plane_damage(state, new_plane_state);
1009
1010 if (!funcs || !funcs->atomic_check)
1011 continue;
1012
1013 ret = funcs->atomic_check(plane, state);
1014 if (ret) {
1015 drm_dbg_atomic(plane->dev,
1016 "[PLANE:%d:%s] atomic driver check failed\n",
1017 plane->base.id, plane->name);
1018 return ret;
1019 }
1020 }
1021
1022 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1023 const struct drm_crtc_helper_funcs *funcs;
1024
1025 funcs = crtc->helper_private;
1026
1027 if (!funcs || !funcs->atomic_check)
1028 continue;
1029
1030 ret = funcs->atomic_check(crtc, state);
1031 if (ret) {
1032 drm_dbg_atomic(crtc->dev,
1033 "[CRTC:%d:%s] atomic driver check failed\n",
1034 crtc->base.id, crtc->name);
1035 return ret;
1036 }
1037 }
1038
1039 return ret;
1040 }
1041 EXPORT_SYMBOL(drm_atomic_helper_check_planes);
1042
1043 /**
1044 * drm_atomic_helper_check - validate state object
1045 * @dev: DRM device
1046 * @state: the driver state object
1047 *
1048 * Check the state object to see if the requested state is physically possible.
1049 * Only CRTCs and planes have check callbacks, so for any additional (global)
1050 * checking that a driver needs it can simply wrap that around this function.
1051 * Drivers without such needs can directly use this as their
1052 * &drm_mode_config_funcs.atomic_check callback.
1053 *
1054 * This just wraps the two parts of the state checking for planes and modeset
1055 * state in the default order: First it calls drm_atomic_helper_check_modeset()
1056 * and then drm_atomic_helper_check_planes(). The assumption is that the
1057 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
1058 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
1059 * watermarks.
1060 *
1061 * Note that zpos normalization will add all enable planes to the state which
1062 * might not desired for some drivers.
1063 * For example enable/disable of a cursor plane which have fixed zpos value
1064 * would trigger all other enabled planes to be forced to the state change.
1065 *
1066 * RETURNS:
1067 * Zero for success or -errno
1068 */
drm_atomic_helper_check(struct drm_device * dev,struct drm_atomic_state * state)1069 int drm_atomic_helper_check(struct drm_device *dev,
1070 struct drm_atomic_state *state)
1071 {
1072 int ret;
1073
1074 ret = drm_atomic_helper_check_modeset(dev, state);
1075 if (ret)
1076 return ret;
1077
1078 if (dev->mode_config.normalize_zpos) {
1079 ret = drm_atomic_normalize_zpos(dev, state);
1080 if (ret)
1081 return ret;
1082 }
1083
1084 ret = drm_atomic_helper_check_planes(dev, state);
1085 if (ret)
1086 return ret;
1087
1088 if (state->legacy_cursor_update)
1089 state->async_update = !drm_atomic_helper_async_check(dev, state);
1090
1091 drm_self_refresh_helper_alter_state(state);
1092
1093 return ret;
1094 }
1095 EXPORT_SYMBOL(drm_atomic_helper_check);
1096
1097 static bool
crtc_needs_disable(struct drm_crtc_state * old_state,struct drm_crtc_state * new_state)1098 crtc_needs_disable(struct drm_crtc_state *old_state,
1099 struct drm_crtc_state *new_state)
1100 {
1101 /*
1102 * No new_state means the CRTC is off, so the only criteria is whether
1103 * it's currently active or in self refresh mode.
1104 */
1105 if (!new_state)
1106 return drm_atomic_crtc_effectively_active(old_state);
1107
1108 /*
1109 * We need to disable bridge(s) and CRTC if we're transitioning out of
1110 * self-refresh and changing CRTCs at the same time, because the
1111 * bridge tracks self-refresh status via CRTC state.
1112 */
1113 if (old_state->self_refresh_active &&
1114 old_state->crtc != new_state->crtc)
1115 return true;
1116
1117 /*
1118 * We also need to run through the crtc_funcs->disable() function if
1119 * the CRTC is currently on, if it's transitioning to self refresh
1120 * mode, or if it's in self refresh mode and needs to be fully
1121 * disabled.
1122 */
1123 return old_state->active ||
1124 (old_state->self_refresh_active && !new_state->active) ||
1125 new_state->self_refresh_active;
1126 }
1127
1128 static void
disable_outputs(struct drm_device * dev,struct drm_atomic_state * old_state)1129 disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
1130 {
1131 struct drm_connector *connector;
1132 struct drm_connector_state *old_conn_state, *new_conn_state;
1133 struct drm_crtc *crtc;
1134 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1135 int i;
1136
1137 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1138 const struct drm_encoder_helper_funcs *funcs;
1139 struct drm_encoder *encoder;
1140 struct drm_bridge *bridge;
1141
1142 /*
1143 * Shut down everything that's in the changeset and currently
1144 * still on. So need to check the old, saved state.
1145 */
1146 if (!old_conn_state->crtc)
1147 continue;
1148
1149 old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
1150
1151 if (new_conn_state->crtc)
1152 new_crtc_state = drm_atomic_get_new_crtc_state(
1153 old_state,
1154 new_conn_state->crtc);
1155 else
1156 new_crtc_state = NULL;
1157
1158 if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1159 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1160 continue;
1161
1162 encoder = old_conn_state->best_encoder;
1163
1164 /* We shouldn't get this far if we didn't previously have
1165 * an encoder.. but WARN_ON() rather than explode.
1166 */
1167 if (WARN_ON(!encoder))
1168 continue;
1169
1170 funcs = encoder->helper_private;
1171
1172 drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n",
1173 encoder->base.id, encoder->name);
1174
1175 /*
1176 * Each encoder has at most one connector (since we always steal
1177 * it away), so we won't call disable hooks twice.
1178 */
1179 bridge = drm_bridge_chain_get_first_bridge(encoder);
1180 drm_atomic_bridge_chain_disable(bridge, old_state);
1181
1182 /* Right function depends upon target state. */
1183 if (funcs) {
1184 if (funcs->atomic_disable)
1185 funcs->atomic_disable(encoder, old_state);
1186 else if (new_conn_state->crtc && funcs->prepare)
1187 funcs->prepare(encoder);
1188 else if (funcs->disable)
1189 funcs->disable(encoder);
1190 else if (funcs->dpms)
1191 funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1192 }
1193
1194 drm_atomic_bridge_chain_post_disable(bridge, old_state);
1195 }
1196
1197 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1198 const struct drm_crtc_helper_funcs *funcs;
1199 int ret;
1200
1201 /* Shut down everything that needs a full modeset. */
1202 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1203 continue;
1204
1205 if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1206 continue;
1207
1208 funcs = crtc->helper_private;
1209
1210 drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
1211 crtc->base.id, crtc->name);
1212
1213
1214 /* Right function depends upon target state. */
1215 if (new_crtc_state->enable && funcs->prepare)
1216 funcs->prepare(crtc);
1217 else if (funcs->atomic_disable)
1218 funcs->atomic_disable(crtc, old_state);
1219 else if (funcs->disable)
1220 funcs->disable(crtc);
1221 else if (funcs->dpms)
1222 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1223
1224 if (!drm_dev_has_vblank(dev))
1225 continue;
1226
1227 ret = drm_crtc_vblank_get(crtc);
1228 WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
1229 if (ret == 0)
1230 drm_crtc_vblank_put(crtc);
1231 }
1232 }
1233
1234 /**
1235 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1236 * @dev: DRM device
1237 * @old_state: atomic state object with old state structures
1238 *
1239 * This function updates all the various legacy modeset state pointers in
1240 * connectors, encoders and CRTCs.
1241 *
1242 * Drivers can use this for building their own atomic commit if they don't have
1243 * a pure helper-based modeset implementation.
1244 *
1245 * Since these updates are not synchronized with lockings, only code paths
1246 * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1247 * legacy state filled out by this helper. Defacto this means this helper and
1248 * the legacy state pointers are only really useful for transitioning an
1249 * existing driver to the atomic world.
1250 */
1251 void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device * dev,struct drm_atomic_state * old_state)1252 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1253 struct drm_atomic_state *old_state)
1254 {
1255 struct drm_connector *connector;
1256 struct drm_connector_state *old_conn_state, *new_conn_state;
1257 struct drm_crtc *crtc;
1258 struct drm_crtc_state *new_crtc_state;
1259 int i;
1260
1261 /* clear out existing links and update dpms */
1262 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1263 if (connector->encoder) {
1264 WARN_ON(!connector->encoder->crtc);
1265
1266 connector->encoder->crtc = NULL;
1267 connector->encoder = NULL;
1268 }
1269
1270 crtc = new_conn_state->crtc;
1271 if ((!crtc && old_conn_state->crtc) ||
1272 (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1273 int mode = DRM_MODE_DPMS_OFF;
1274
1275 if (crtc && crtc->state->active)
1276 mode = DRM_MODE_DPMS_ON;
1277
1278 connector->dpms = mode;
1279 }
1280 }
1281
1282 /* set new links */
1283 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1284 if (!new_conn_state->crtc)
1285 continue;
1286
1287 if (WARN_ON(!new_conn_state->best_encoder))
1288 continue;
1289
1290 connector->encoder = new_conn_state->best_encoder;
1291 connector->encoder->crtc = new_conn_state->crtc;
1292 }
1293
1294 /* set legacy state in the crtc structure */
1295 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1296 struct drm_plane *primary = crtc->primary;
1297 struct drm_plane_state *new_plane_state;
1298
1299 crtc->mode = new_crtc_state->mode;
1300 crtc->enabled = new_crtc_state->enable;
1301
1302 new_plane_state =
1303 drm_atomic_get_new_plane_state(old_state, primary);
1304
1305 if (new_plane_state && new_plane_state->crtc == crtc) {
1306 crtc->x = new_plane_state->src_x >> 16;
1307 crtc->y = new_plane_state->src_y >> 16;
1308 }
1309 }
1310 }
1311 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1312
1313 /**
1314 * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1315 * @state: atomic state object
1316 *
1317 * Updates the timestamping constants used for precise vblank timestamps
1318 * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1319 */
drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state * state)1320 void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1321 {
1322 struct drm_crtc_state *new_crtc_state;
1323 struct drm_crtc *crtc;
1324 int i;
1325
1326 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1327 if (new_crtc_state->enable)
1328 drm_calc_timestamping_constants(crtc,
1329 &new_crtc_state->adjusted_mode);
1330 }
1331 }
1332 EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1333
1334 static void
crtc_set_mode(struct drm_device * dev,struct drm_atomic_state * old_state)1335 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
1336 {
1337 struct drm_crtc *crtc;
1338 struct drm_crtc_state *new_crtc_state;
1339 struct drm_connector *connector;
1340 struct drm_connector_state *new_conn_state;
1341 int i;
1342
1343 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1344 const struct drm_crtc_helper_funcs *funcs;
1345
1346 if (!new_crtc_state->mode_changed)
1347 continue;
1348
1349 funcs = crtc->helper_private;
1350
1351 if (new_crtc_state->enable && funcs->mode_set_nofb) {
1352 drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n",
1353 crtc->base.id, crtc->name);
1354
1355 funcs->mode_set_nofb(crtc);
1356 }
1357 }
1358
1359 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1360 const struct drm_encoder_helper_funcs *funcs;
1361 struct drm_encoder *encoder;
1362 struct drm_display_mode *mode, *adjusted_mode;
1363 struct drm_bridge *bridge;
1364
1365 if (!new_conn_state->best_encoder)
1366 continue;
1367
1368 encoder = new_conn_state->best_encoder;
1369 funcs = encoder->helper_private;
1370 new_crtc_state = new_conn_state->crtc->state;
1371 mode = &new_crtc_state->mode;
1372 adjusted_mode = &new_crtc_state->adjusted_mode;
1373
1374 if (!new_crtc_state->mode_changed)
1375 continue;
1376
1377 drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
1378 encoder->base.id, encoder->name);
1379
1380 /*
1381 * Each encoder has at most one connector (since we always steal
1382 * it away), so we won't call mode_set hooks twice.
1383 */
1384 if (funcs && funcs->atomic_mode_set) {
1385 funcs->atomic_mode_set(encoder, new_crtc_state,
1386 new_conn_state);
1387 } else if (funcs && funcs->mode_set) {
1388 funcs->mode_set(encoder, mode, adjusted_mode);
1389 }
1390
1391 bridge = drm_bridge_chain_get_first_bridge(encoder);
1392 drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1393 }
1394 }
1395
1396 /**
1397 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1398 * @dev: DRM device
1399 * @old_state: atomic state object with old state structures
1400 *
1401 * This function shuts down all the outputs that need to be shut down and
1402 * prepares them (if required) with the new mode.
1403 *
1404 * For compatibility with legacy CRTC helpers this should be called before
1405 * drm_atomic_helper_commit_planes(), which is what the default commit function
1406 * does. But drivers with different needs can group the modeset commits together
1407 * and do the plane commits at the end. This is useful for drivers doing runtime
1408 * PM since planes updates then only happen when the CRTC is actually enabled.
1409 */
drm_atomic_helper_commit_modeset_disables(struct drm_device * dev,struct drm_atomic_state * old_state)1410 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1411 struct drm_atomic_state *old_state)
1412 {
1413 disable_outputs(dev, old_state);
1414
1415 drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
1416 drm_atomic_helper_calc_timestamping_constants(old_state);
1417
1418 crtc_set_mode(dev, old_state);
1419 }
1420 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1421
drm_atomic_helper_commit_writebacks(struct drm_device * dev,struct drm_atomic_state * old_state)1422 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1423 struct drm_atomic_state *old_state)
1424 {
1425 struct drm_connector *connector;
1426 struct drm_connector_state *new_conn_state;
1427 int i;
1428
1429 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1430 const struct drm_connector_helper_funcs *funcs;
1431
1432 funcs = connector->helper_private;
1433 if (!funcs->atomic_commit)
1434 continue;
1435
1436 if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1437 WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1438 funcs->atomic_commit(connector, old_state);
1439 }
1440 }
1441 }
1442
1443 /**
1444 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1445 * @dev: DRM device
1446 * @old_state: atomic state object with old state structures
1447 *
1448 * This function enables all the outputs with the new configuration which had to
1449 * be turned off for the update.
1450 *
1451 * For compatibility with legacy CRTC helpers this should be called after
1452 * drm_atomic_helper_commit_planes(), which is what the default commit function
1453 * does. But drivers with different needs can group the modeset commits together
1454 * and do the plane commits at the end. This is useful for drivers doing runtime
1455 * PM since planes updates then only happen when the CRTC is actually enabled.
1456 */
drm_atomic_helper_commit_modeset_enables(struct drm_device * dev,struct drm_atomic_state * old_state)1457 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1458 struct drm_atomic_state *old_state)
1459 {
1460 struct drm_crtc *crtc;
1461 struct drm_crtc_state *old_crtc_state;
1462 struct drm_crtc_state *new_crtc_state;
1463 struct drm_connector *connector;
1464 struct drm_connector_state *new_conn_state;
1465 int i;
1466
1467 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1468 const struct drm_crtc_helper_funcs *funcs;
1469
1470 /* Need to filter out CRTCs where only planes change. */
1471 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1472 continue;
1473
1474 if (!new_crtc_state->active)
1475 continue;
1476
1477 funcs = crtc->helper_private;
1478
1479 if (new_crtc_state->enable) {
1480 drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
1481 crtc->base.id, crtc->name);
1482 if (funcs->atomic_enable)
1483 funcs->atomic_enable(crtc, old_state);
1484 else if (funcs->commit)
1485 funcs->commit(crtc);
1486 }
1487 }
1488
1489 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1490 const struct drm_encoder_helper_funcs *funcs;
1491 struct drm_encoder *encoder;
1492 struct drm_bridge *bridge;
1493
1494 if (!new_conn_state->best_encoder)
1495 continue;
1496
1497 if (!new_conn_state->crtc->state->active ||
1498 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1499 continue;
1500
1501 encoder = new_conn_state->best_encoder;
1502 funcs = encoder->helper_private;
1503
1504 drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n",
1505 encoder->base.id, encoder->name);
1506
1507 /*
1508 * Each encoder has at most one connector (since we always steal
1509 * it away), so we won't call enable hooks twice.
1510 */
1511 bridge = drm_bridge_chain_get_first_bridge(encoder);
1512 drm_atomic_bridge_chain_pre_enable(bridge, old_state);
1513
1514 if (funcs) {
1515 if (funcs->atomic_enable)
1516 funcs->atomic_enable(encoder, old_state);
1517 else if (funcs->enable)
1518 funcs->enable(encoder);
1519 else if (funcs->commit)
1520 funcs->commit(encoder);
1521 }
1522
1523 drm_atomic_bridge_chain_enable(bridge, old_state);
1524 }
1525
1526 drm_atomic_helper_commit_writebacks(dev, old_state);
1527 }
1528 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1529
1530 /**
1531 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1532 * @dev: DRM device
1533 * @state: atomic state object with old state structures
1534 * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1535 * Otherwise @state is the old state.
1536 *
1537 * For implicit sync, driver should fish the exclusive fence out from the
1538 * incoming fb's and stash it in the drm_plane_state. This is called after
1539 * drm_atomic_helper_swap_state() so it uses the current plane state (and
1540 * just uses the atomic state to find the changed planes)
1541 *
1542 * Note that @pre_swap is needed since the point where we block for fences moves
1543 * around depending upon whether an atomic commit is blocking or
1544 * non-blocking. For non-blocking commit all waiting needs to happen after
1545 * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1546 * to wait **before** we do anything that can't be easily rolled back. That is
1547 * before we call drm_atomic_helper_swap_state().
1548 *
1549 * Returns zero if success or < 0 if dma_fence_wait() fails.
1550 */
drm_atomic_helper_wait_for_fences(struct drm_device * dev,struct drm_atomic_state * state,bool pre_swap)1551 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1552 struct drm_atomic_state *state,
1553 bool pre_swap)
1554 {
1555 struct drm_plane *plane;
1556 struct drm_plane_state *new_plane_state;
1557 int i, ret;
1558
1559 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1560 if (!new_plane_state->fence)
1561 continue;
1562
1563 WARN_ON(!new_plane_state->fb);
1564
1565 /*
1566 * If waiting for fences pre-swap (ie: nonblock), userspace can
1567 * still interrupt the operation. Instead of blocking until the
1568 * timer expires, make the wait interruptible.
1569 */
1570 ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1571 if (ret)
1572 return ret;
1573
1574 dma_fence_put(new_plane_state->fence);
1575 new_plane_state->fence = NULL;
1576 }
1577
1578 return 0;
1579 }
1580 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1581
1582 /**
1583 * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1584 * @dev: DRM device
1585 * @old_state: atomic state object with old state structures
1586 *
1587 * Helper to, after atomic commit, wait for vblanks on all affected
1588 * CRTCs (ie. before cleaning up old framebuffers using
1589 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1590 * framebuffers have actually changed to optimize for the legacy cursor and
1591 * plane update use-case.
1592 *
1593 * Drivers using the nonblocking commit tracking support initialized by calling
1594 * drm_atomic_helper_setup_commit() should look at
1595 * drm_atomic_helper_wait_for_flip_done() as an alternative.
1596 */
1597 void
drm_atomic_helper_wait_for_vblanks(struct drm_device * dev,struct drm_atomic_state * old_state)1598 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1599 struct drm_atomic_state *old_state)
1600 {
1601 struct drm_crtc *crtc;
1602 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1603 int i, ret;
1604 unsigned int crtc_mask = 0;
1605
1606 /*
1607 * Legacy cursor ioctls are completely unsynced, and userspace
1608 * relies on that (by doing tons of cursor updates).
1609 */
1610 if (old_state->legacy_cursor_update)
1611 return;
1612
1613 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1614 if (!new_crtc_state->active)
1615 continue;
1616
1617 ret = drm_crtc_vblank_get(crtc);
1618 if (ret != 0)
1619 continue;
1620
1621 crtc_mask |= drm_crtc_mask(crtc);
1622 old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1623 }
1624
1625 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1626 if (!(crtc_mask & drm_crtc_mask(crtc)))
1627 continue;
1628
1629 ret = wait_event_timeout(dev->vblank[i].queue,
1630 old_state->crtcs[i].last_vblank_count !=
1631 drm_crtc_vblank_count(crtc),
1632 msecs_to_jiffies(100));
1633
1634 WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1635 crtc->base.id, crtc->name);
1636
1637 drm_crtc_vblank_put(crtc);
1638 }
1639 }
1640 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1641
1642 /**
1643 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1644 * @dev: DRM device
1645 * @old_state: atomic state object with old state structures
1646 *
1647 * Helper to, after atomic commit, wait for page flips on all affected
1648 * crtcs (ie. before cleaning up old framebuffers using
1649 * drm_atomic_helper_cleanup_planes()). Compared to
1650 * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1651 * CRTCs, assuming that cursors-only updates are signalling their completion
1652 * immediately (or using a different path).
1653 *
1654 * This requires that drivers use the nonblocking commit tracking support
1655 * initialized using drm_atomic_helper_setup_commit().
1656 */
drm_atomic_helper_wait_for_flip_done(struct drm_device * dev,struct drm_atomic_state * old_state)1657 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1658 struct drm_atomic_state *old_state)
1659 {
1660 struct drm_crtc *crtc;
1661 int i;
1662
1663 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1664 struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
1665 int ret;
1666
1667 crtc = old_state->crtcs[i].ptr;
1668
1669 if (!crtc || !commit)
1670 continue;
1671
1672 ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1673 if (ret == 0)
1674 drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n",
1675 crtc->base.id, crtc->name);
1676 }
1677
1678 if (old_state->fake_commit)
1679 complete_all(&old_state->fake_commit->flip_done);
1680 }
1681 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1682
1683 /**
1684 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1685 * @old_state: atomic state object with old state structures
1686 *
1687 * This is the default implementation for the
1688 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1689 * that do not support runtime_pm or do not need the CRTC to be
1690 * enabled to perform a commit. Otherwise, see
1691 * drm_atomic_helper_commit_tail_rpm().
1692 *
1693 * Note that the default ordering of how the various stages are called is to
1694 * match the legacy modeset helper library closest.
1695 */
drm_atomic_helper_commit_tail(struct drm_atomic_state * old_state)1696 void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
1697 {
1698 struct drm_device *dev = old_state->dev;
1699
1700 drm_atomic_helper_commit_modeset_disables(dev, old_state);
1701
1702 drm_atomic_helper_commit_planes(dev, old_state, 0);
1703
1704 drm_atomic_helper_commit_modeset_enables(dev, old_state);
1705
1706 drm_atomic_helper_fake_vblank(old_state);
1707
1708 drm_atomic_helper_commit_hw_done(old_state);
1709
1710 drm_atomic_helper_wait_for_vblanks(dev, old_state);
1711
1712 drm_atomic_helper_cleanup_planes(dev, old_state);
1713 }
1714 EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1715
1716 /**
1717 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1718 * @old_state: new modeset state to be committed
1719 *
1720 * This is an alternative implementation for the
1721 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1722 * that support runtime_pm or need the CRTC to be enabled to perform a
1723 * commit. Otherwise, one should use the default implementation
1724 * drm_atomic_helper_commit_tail().
1725 */
drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state * old_state)1726 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
1727 {
1728 struct drm_device *dev = old_state->dev;
1729
1730 drm_atomic_helper_commit_modeset_disables(dev, old_state);
1731
1732 drm_atomic_helper_commit_modeset_enables(dev, old_state);
1733
1734 drm_atomic_helper_commit_planes(dev, old_state,
1735 DRM_PLANE_COMMIT_ACTIVE_ONLY);
1736
1737 drm_atomic_helper_fake_vblank(old_state);
1738
1739 drm_atomic_helper_commit_hw_done(old_state);
1740
1741 drm_atomic_helper_wait_for_vblanks(dev, old_state);
1742
1743 drm_atomic_helper_cleanup_planes(dev, old_state);
1744 }
1745 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1746
commit_tail(struct drm_atomic_state * old_state)1747 static void commit_tail(struct drm_atomic_state *old_state)
1748 {
1749 struct drm_device *dev = old_state->dev;
1750 const struct drm_mode_config_helper_funcs *funcs;
1751 struct drm_crtc_state *new_crtc_state;
1752 struct drm_crtc *crtc;
1753 ktime_t start;
1754 s64 commit_time_ms;
1755 unsigned int i, new_self_refresh_mask = 0;
1756
1757 funcs = dev->mode_config.helper_private;
1758
1759 /*
1760 * We're measuring the _entire_ commit, so the time will vary depending
1761 * on how many fences and objects are involved. For the purposes of self
1762 * refresh, this is desirable since it'll give us an idea of how
1763 * congested things are. This will inform our decision on how often we
1764 * should enter self refresh after idle.
1765 *
1766 * These times will be averaged out in the self refresh helpers to avoid
1767 * overreacting over one outlier frame
1768 */
1769 start = ktime_get();
1770
1771 drm_atomic_helper_wait_for_fences(dev, old_state, false);
1772
1773 drm_atomic_helper_wait_for_dependencies(old_state);
1774
1775 /*
1776 * We cannot safely access new_crtc_state after
1777 * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1778 * self-refresh active beforehand:
1779 */
1780 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
1781 if (new_crtc_state->self_refresh_active)
1782 new_self_refresh_mask |= BIT(i);
1783
1784 if (funcs && funcs->atomic_commit_tail)
1785 funcs->atomic_commit_tail(old_state);
1786 else
1787 drm_atomic_helper_commit_tail(old_state);
1788
1789 commit_time_ms = ktime_ms_delta(ktime_get(), start);
1790 if (commit_time_ms > 0)
1791 drm_self_refresh_helper_update_avg_times(old_state,
1792 (unsigned long)commit_time_ms,
1793 new_self_refresh_mask);
1794
1795 drm_atomic_helper_commit_cleanup_done(old_state);
1796
1797 drm_atomic_state_put(old_state);
1798 }
1799
commit_work(struct work_struct * work)1800 static void commit_work(struct work_struct *work)
1801 {
1802 struct drm_atomic_state *state = container_of(work,
1803 struct drm_atomic_state,
1804 commit_work);
1805 commit_tail(state);
1806 }
1807
1808 /**
1809 * drm_atomic_helper_async_check - check if state can be committed asynchronously
1810 * @dev: DRM device
1811 * @state: the driver state object
1812 *
1813 * This helper will check if it is possible to commit the state asynchronously.
1814 * Async commits are not supposed to swap the states like normal sync commits
1815 * but just do in-place changes on the current state.
1816 *
1817 * It will return 0 if the commit can happen in an asynchronous fashion or error
1818 * if not. Note that error just mean it can't be committed asynchronously, if it
1819 * fails the commit should be treated like a normal synchronous commit.
1820 */
drm_atomic_helper_async_check(struct drm_device * dev,struct drm_atomic_state * state)1821 int drm_atomic_helper_async_check(struct drm_device *dev,
1822 struct drm_atomic_state *state)
1823 {
1824 struct drm_crtc *crtc;
1825 struct drm_crtc_state *crtc_state;
1826 struct drm_plane *plane = NULL;
1827 struct drm_plane_state *old_plane_state = NULL;
1828 struct drm_plane_state *new_plane_state = NULL;
1829 const struct drm_plane_helper_funcs *funcs;
1830 int i, ret, n_planes = 0;
1831
1832 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1833 if (drm_atomic_crtc_needs_modeset(crtc_state))
1834 return -EINVAL;
1835 }
1836
1837 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
1838 n_planes++;
1839
1840 /* FIXME: we support only single plane updates for now */
1841 if (n_planes != 1) {
1842 drm_dbg_atomic(dev,
1843 "only single plane async updates are supported\n");
1844 return -EINVAL;
1845 }
1846
1847 if (!new_plane_state->crtc ||
1848 old_plane_state->crtc != new_plane_state->crtc) {
1849 drm_dbg_atomic(dev,
1850 "[PLANE:%d:%s] async update cannot change CRTC\n",
1851 plane->base.id, plane->name);
1852 return -EINVAL;
1853 }
1854
1855 funcs = plane->helper_private;
1856 if (!funcs->atomic_async_update) {
1857 drm_dbg_atomic(dev,
1858 "[PLANE:%d:%s] driver does not support async updates\n",
1859 plane->base.id, plane->name);
1860 return -EINVAL;
1861 }
1862
1863 if (new_plane_state->fence) {
1864 drm_dbg_atomic(dev,
1865 "[PLANE:%d:%s] missing fence for async update\n",
1866 plane->base.id, plane->name);
1867 return -EINVAL;
1868 }
1869
1870 /*
1871 * Don't do an async update if there is an outstanding commit modifying
1872 * the plane. This prevents our async update's changes from getting
1873 * overridden by a previous synchronous update's state.
1874 */
1875 if (old_plane_state->commit &&
1876 !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
1877 drm_dbg_atomic(dev,
1878 "[PLANE:%d:%s] inflight previous commit preventing async commit\n",
1879 plane->base.id, plane->name);
1880 return -EBUSY;
1881 }
1882
1883 ret = funcs->atomic_async_check(plane, state);
1884 if (ret != 0)
1885 drm_dbg_atomic(dev,
1886 "[PLANE:%d:%s] driver async check failed\n",
1887 plane->base.id, plane->name);
1888 return ret;
1889 }
1890 EXPORT_SYMBOL(drm_atomic_helper_async_check);
1891
1892 /**
1893 * drm_atomic_helper_async_commit - commit state asynchronously
1894 * @dev: DRM device
1895 * @state: the driver state object
1896 *
1897 * This function commits a state asynchronously, i.e., not vblank
1898 * synchronized. It should be used on a state only when
1899 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1900 * the states like normal sync commits, but just do in-place changes on the
1901 * current state.
1902 *
1903 * TODO: Implement full swap instead of doing in-place changes.
1904 */
drm_atomic_helper_async_commit(struct drm_device * dev,struct drm_atomic_state * state)1905 void drm_atomic_helper_async_commit(struct drm_device *dev,
1906 struct drm_atomic_state *state)
1907 {
1908 struct drm_plane *plane;
1909 struct drm_plane_state *plane_state;
1910 const struct drm_plane_helper_funcs *funcs;
1911 int i;
1912
1913 for_each_new_plane_in_state(state, plane, plane_state, i) {
1914 struct drm_framebuffer *new_fb = plane_state->fb;
1915 struct drm_framebuffer *old_fb = plane->state->fb;
1916
1917 funcs = plane->helper_private;
1918 funcs->atomic_async_update(plane, state);
1919
1920 /*
1921 * ->atomic_async_update() is supposed to update the
1922 * plane->state in-place, make sure at least common
1923 * properties have been properly updated.
1924 */
1925 WARN_ON_ONCE(plane->state->fb != new_fb);
1926 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1927 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1928 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1929 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1930
1931 /*
1932 * Make sure the FBs have been swapped so that cleanups in the
1933 * new_state performs a cleanup in the old FB.
1934 */
1935 WARN_ON_ONCE(plane_state->fb != old_fb);
1936 }
1937 }
1938 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
1939
1940 /**
1941 * drm_atomic_helper_commit - commit validated state object
1942 * @dev: DRM device
1943 * @state: the driver state object
1944 * @nonblock: whether nonblocking behavior is requested.
1945 *
1946 * This function commits a with drm_atomic_helper_check() pre-validated state
1947 * object. This can still fail when e.g. the framebuffer reservation fails. This
1948 * function implements nonblocking commits, using
1949 * drm_atomic_helper_setup_commit() and related functions.
1950 *
1951 * Committing the actual hardware state is done through the
1952 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
1953 * implementation drm_atomic_helper_commit_tail().
1954 *
1955 * RETURNS:
1956 * Zero for success or -errno.
1957 */
drm_atomic_helper_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)1958 int drm_atomic_helper_commit(struct drm_device *dev,
1959 struct drm_atomic_state *state,
1960 bool nonblock)
1961 {
1962 int ret;
1963
1964 if (state->async_update) {
1965 ret = drm_atomic_helper_prepare_planes(dev, state);
1966 if (ret)
1967 return ret;
1968
1969 drm_atomic_helper_async_commit(dev, state);
1970 drm_atomic_helper_cleanup_planes(dev, state);
1971
1972 return 0;
1973 }
1974
1975 ret = drm_atomic_helper_setup_commit(state, nonblock);
1976 if (ret)
1977 return ret;
1978
1979 INIT_WORK(&state->commit_work, commit_work);
1980
1981 ret = drm_atomic_helper_prepare_planes(dev, state);
1982 if (ret)
1983 return ret;
1984
1985 if (!nonblock) {
1986 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1987 if (ret)
1988 goto err;
1989 }
1990
1991 /*
1992 * This is the point of no return - everything below never fails except
1993 * when the hw goes bonghits. Which means we can commit the new state on
1994 * the software side now.
1995 */
1996
1997 ret = drm_atomic_helper_swap_state(state, true);
1998 if (ret)
1999 goto err;
2000
2001 /*
2002 * Everything below can be run asynchronously without the need to grab
2003 * any modeset locks at all under one condition: It must be guaranteed
2004 * that the asynchronous work has either been cancelled (if the driver
2005 * supports it, which at least requires that the framebuffers get
2006 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2007 * before the new state gets committed on the software side with
2008 * drm_atomic_helper_swap_state().
2009 *
2010 * This scheme allows new atomic state updates to be prepared and
2011 * checked in parallel to the asynchronous completion of the previous
2012 * update. Which is important since compositors need to figure out the
2013 * composition of the next frame right after having submitted the
2014 * current layout.
2015 *
2016 * NOTE: Commit work has multiple phases, first hardware commit, then
2017 * cleanup. We want them to overlap, hence need system_unbound_wq to
2018 * make sure work items don't artificially stall on each another.
2019 */
2020
2021 drm_atomic_state_get(state);
2022 if (nonblock)
2023 queue_work(system_unbound_wq, &state->commit_work);
2024 else
2025 commit_tail(state);
2026
2027 return 0;
2028
2029 err:
2030 drm_atomic_helper_cleanup_planes(dev, state);
2031 return ret;
2032 }
2033 EXPORT_SYMBOL(drm_atomic_helper_commit);
2034
2035 /**
2036 * DOC: implementing nonblocking commit
2037 *
2038 * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
2039 * different operations against each another. Locks, especially struct
2040 * &drm_modeset_lock, should not be held in worker threads or any other
2041 * asynchronous context used to commit the hardware state.
2042 *
2043 * drm_atomic_helper_commit() implements the recommended sequence for
2044 * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
2045 *
2046 * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
2047 * need to propagate out of memory/VRAM errors to userspace, it must be called
2048 * synchronously.
2049 *
2050 * 2. Synchronize with any outstanding nonblocking commit worker threads which
2051 * might be affected by the new state update. This is handled by
2052 * drm_atomic_helper_setup_commit().
2053 *
2054 * Asynchronous workers need to have sufficient parallelism to be able to run
2055 * different atomic commits on different CRTCs in parallel. The simplest way to
2056 * achieve this is by running them on the &system_unbound_wq work queue. Note
2057 * that drivers are not required to split up atomic commits and run an
2058 * individual commit in parallel - userspace is supposed to do that if it cares.
2059 * But it might be beneficial to do that for modesets, since those necessarily
2060 * must be done as one global operation, and enabling or disabling a CRTC can
2061 * take a long time. But even that is not required.
2062 *
2063 * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
2064 * against all CRTCs therein. Therefore for atomic state updates which only flip
2065 * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
2066 * in its atomic check code: This would prevent committing of atomic updates to
2067 * multiple CRTCs in parallel. In general, adding additional state structures
2068 * should be avoided as much as possible, because this reduces parallelism in
2069 * (nonblocking) commits, both due to locking and due to commit sequencing
2070 * requirements.
2071 *
2072 * 3. The software state is updated synchronously with
2073 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
2074 * locks means concurrent callers never see inconsistent state. Note that commit
2075 * workers do not hold any locks; their access is only coordinated through
2076 * ordering. If workers would access state only through the pointers in the
2077 * free-standing state objects (currently not the case for any driver) then even
2078 * multiple pending commits could be in-flight at the same time.
2079 *
2080 * 4. Schedule a work item to do all subsequent steps, using the split-out
2081 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
2082 * then cleaning up the framebuffers after the old framebuffer is no longer
2083 * being displayed. The scheduled work should synchronize against other workers
2084 * using the &drm_crtc_commit infrastructure as needed. See
2085 * drm_atomic_helper_setup_commit() for more details.
2086 */
2087
stall_checks(struct drm_crtc * crtc,bool nonblock)2088 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
2089 {
2090 struct drm_crtc_commit *commit, *stall_commit = NULL;
2091 bool completed = true;
2092 int i;
2093 long ret = 0;
2094
2095 spin_lock(&crtc->commit_lock);
2096 i = 0;
2097 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
2098 if (i == 0) {
2099 completed = try_wait_for_completion(&commit->flip_done);
2100 /*
2101 * Userspace is not allowed to get ahead of the previous
2102 * commit with nonblocking ones.
2103 */
2104 if (!completed && nonblock) {
2105 spin_unlock(&crtc->commit_lock);
2106 drm_dbg_atomic(crtc->dev,
2107 "[CRTC:%d:%s] busy with a previous commit\n",
2108 crtc->base.id, crtc->name);
2109
2110 return -EBUSY;
2111 }
2112 } else if (i == 1) {
2113 stall_commit = drm_crtc_commit_get(commit);
2114 break;
2115 }
2116
2117 i++;
2118 }
2119 spin_unlock(&crtc->commit_lock);
2120
2121 if (!stall_commit)
2122 return 0;
2123
2124 /* We don't want to let commits get ahead of cleanup work too much,
2125 * stalling on 2nd previous commit means triple-buffer won't ever stall.
2126 */
2127 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
2128 10*HZ);
2129 if (ret == 0)
2130 drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n",
2131 crtc->base.id, crtc->name);
2132
2133 drm_crtc_commit_put(stall_commit);
2134
2135 return ret < 0 ? ret : 0;
2136 }
2137
release_crtc_commit(struct completion * completion)2138 static void release_crtc_commit(struct completion *completion)
2139 {
2140 struct drm_crtc_commit *commit = container_of(completion,
2141 typeof(*commit),
2142 flip_done);
2143
2144 drm_crtc_commit_put(commit);
2145 }
2146
init_commit(struct drm_crtc_commit * commit,struct drm_crtc * crtc)2147 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2148 {
2149 init_completion(&commit->flip_done);
2150 init_completion(&commit->hw_done);
2151 init_completion(&commit->cleanup_done);
2152 INIT_LIST_HEAD(&commit->commit_entry);
2153 kref_init(&commit->ref);
2154 commit->crtc = crtc;
2155 }
2156
2157 static struct drm_crtc_commit *
crtc_or_fake_commit(struct drm_atomic_state * state,struct drm_crtc * crtc)2158 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2159 {
2160 if (crtc) {
2161 struct drm_crtc_state *new_crtc_state;
2162
2163 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2164
2165 return new_crtc_state->commit;
2166 }
2167
2168 if (!state->fake_commit) {
2169 state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
2170 if (!state->fake_commit)
2171 return NULL;
2172
2173 init_commit(state->fake_commit, NULL);
2174 }
2175
2176 return state->fake_commit;
2177 }
2178
2179 /**
2180 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2181 * @state: new modeset state to be committed
2182 * @nonblock: whether nonblocking behavior is requested.
2183 *
2184 * This function prepares @state to be used by the atomic helper's support for
2185 * nonblocking commits. Drivers using the nonblocking commit infrastructure
2186 * should always call this function from their
2187 * &drm_mode_config_funcs.atomic_commit hook.
2188 *
2189 * Drivers that need to extend the commit setup to private objects can use the
2190 * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
2191 *
2192 * To be able to use this support drivers need to use a few more helper
2193 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
2194 * actually committing the hardware state, and for nonblocking commits this call
2195 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
2196 * and its stall parameter, for when a driver's commit hooks look at the
2197 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2198 *
2199 * Completion of the hardware commit step must be signalled using
2200 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2201 * to read or change any permanent software or hardware modeset state. The only
2202 * exception is state protected by other means than &drm_modeset_lock locks.
2203 * Only the free standing @state with pointers to the old state structures can
2204 * be inspected, e.g. to clean up old buffers using
2205 * drm_atomic_helper_cleanup_planes().
2206 *
2207 * At the very end, before cleaning up @state drivers must call
2208 * drm_atomic_helper_commit_cleanup_done().
2209 *
2210 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2211 * complete and easy-to-use default implementation of the atomic_commit() hook.
2212 *
2213 * The tracking of asynchronously executed and still pending commits is done
2214 * using the core structure &drm_crtc_commit.
2215 *
2216 * By default there's no need to clean up resources allocated by this function
2217 * explicitly: drm_atomic_state_default_clear() will take care of that
2218 * automatically.
2219 *
2220 * Returns:
2221 *
2222 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2223 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
2224 */
drm_atomic_helper_setup_commit(struct drm_atomic_state * state,bool nonblock)2225 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2226 bool nonblock)
2227 {
2228 struct drm_crtc *crtc;
2229 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2230 struct drm_connector *conn;
2231 struct drm_connector_state *old_conn_state, *new_conn_state;
2232 struct drm_plane *plane;
2233 struct drm_plane_state *old_plane_state, *new_plane_state;
2234 struct drm_crtc_commit *commit;
2235 const struct drm_mode_config_helper_funcs *funcs;
2236 int i, ret;
2237
2238 funcs = state->dev->mode_config.helper_private;
2239
2240 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2241 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
2242 if (!commit)
2243 return -ENOMEM;
2244
2245 init_commit(commit, crtc);
2246
2247 new_crtc_state->commit = commit;
2248
2249 ret = stall_checks(crtc, nonblock);
2250 if (ret)
2251 return ret;
2252
2253 /*
2254 * Drivers only send out events when at least either current or
2255 * new CRTC state is active. Complete right away if everything
2256 * stays off.
2257 */
2258 if (!old_crtc_state->active && !new_crtc_state->active) {
2259 complete_all(&commit->flip_done);
2260 continue;
2261 }
2262
2263 /* Legacy cursor updates are fully unsynced. */
2264 if (state->legacy_cursor_update) {
2265 complete_all(&commit->flip_done);
2266 continue;
2267 }
2268
2269 if (!new_crtc_state->event) {
2270 commit->event = kzalloc(sizeof(*commit->event),
2271 GFP_KERNEL);
2272 if (!commit->event)
2273 return -ENOMEM;
2274
2275 new_crtc_state->event = commit->event;
2276 }
2277
2278 new_crtc_state->event->base.completion = &commit->flip_done;
2279 new_crtc_state->event->base.completion_release = release_crtc_commit;
2280 drm_crtc_commit_get(commit);
2281
2282 commit->abort_completion = true;
2283
2284 state->crtcs[i].commit = commit;
2285 drm_crtc_commit_get(commit);
2286 }
2287
2288 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2289 /*
2290 * Userspace is not allowed to get ahead of the previous
2291 * commit with nonblocking ones.
2292 */
2293 if (nonblock && old_conn_state->commit &&
2294 !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
2295 drm_dbg_atomic(conn->dev,
2296 "[CONNECTOR:%d:%s] busy with a previous commit\n",
2297 conn->base.id, conn->name);
2298
2299 return -EBUSY;
2300 }
2301
2302 /* Always track connectors explicitly for e.g. link retraining. */
2303 commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2304 if (!commit)
2305 return -ENOMEM;
2306
2307 new_conn_state->commit = drm_crtc_commit_get(commit);
2308 }
2309
2310 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2311 /*
2312 * Userspace is not allowed to get ahead of the previous
2313 * commit with nonblocking ones.
2314 */
2315 if (nonblock && old_plane_state->commit &&
2316 !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
2317 drm_dbg_atomic(plane->dev,
2318 "[PLANE:%d:%s] busy with a previous commit\n",
2319 plane->base.id, plane->name);
2320
2321 return -EBUSY;
2322 }
2323
2324 /* Always track planes explicitly for async pageflip support. */
2325 commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2326 if (!commit)
2327 return -ENOMEM;
2328
2329 new_plane_state->commit = drm_crtc_commit_get(commit);
2330 }
2331
2332 if (funcs && funcs->atomic_commit_setup)
2333 return funcs->atomic_commit_setup(state);
2334
2335 return 0;
2336 }
2337 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2338
2339 /**
2340 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
2341 * @old_state: atomic state object with old state structures
2342 *
2343 * This function waits for all preceeding commits that touch the same CRTC as
2344 * @old_state to both be committed to the hardware (as signalled by
2345 * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2346 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2347 *
2348 * This is part of the atomic helper support for nonblocking commits, see
2349 * drm_atomic_helper_setup_commit() for an overview.
2350 */
drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state * old_state)2351 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
2352 {
2353 struct drm_crtc *crtc;
2354 struct drm_crtc_state *old_crtc_state;
2355 struct drm_plane *plane;
2356 struct drm_plane_state *old_plane_state;
2357 struct drm_connector *conn;
2358 struct drm_connector_state *old_conn_state;
2359 int i;
2360 long ret;
2361
2362 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2363 ret = drm_crtc_commit_wait(old_crtc_state->commit);
2364 if (ret)
2365 drm_err(crtc->dev,
2366 "[CRTC:%d:%s] commit wait timed out\n",
2367 crtc->base.id, crtc->name);
2368 }
2369
2370 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
2371 ret = drm_crtc_commit_wait(old_conn_state->commit);
2372 if (ret)
2373 drm_err(conn->dev,
2374 "[CONNECTOR:%d:%s] commit wait timed out\n",
2375 conn->base.id, conn->name);
2376 }
2377
2378 for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
2379 ret = drm_crtc_commit_wait(old_plane_state->commit);
2380 if (ret)
2381 drm_err(plane->dev,
2382 "[PLANE:%d:%s] commit wait timed out\n",
2383 plane->base.id, plane->name);
2384 }
2385 }
2386 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2387
2388 /**
2389 * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2390 * @old_state: atomic state object with old state structures
2391 *
2392 * This function walks all CRTCs and fakes VBLANK events on those with
2393 * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2394 * The primary use of this function is writeback connectors working in oneshot
2395 * mode and faking VBLANK events. In this case they only fake the VBLANK event
2396 * when a job is queued, and any change to the pipeline that does not touch the
2397 * connector is leading to timeouts when calling
2398 * drm_atomic_helper_wait_for_vblanks() or
2399 * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2400 * connectors, this function can also fake VBLANK events for CRTCs without
2401 * VBLANK interrupt.
2402 *
2403 * This is part of the atomic helper support for nonblocking commits, see
2404 * drm_atomic_helper_setup_commit() for an overview.
2405 */
drm_atomic_helper_fake_vblank(struct drm_atomic_state * old_state)2406 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
2407 {
2408 struct drm_crtc_state *new_crtc_state;
2409 struct drm_crtc *crtc;
2410 int i;
2411
2412 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
2413 unsigned long flags;
2414
2415 if (!new_crtc_state->no_vblank)
2416 continue;
2417
2418 spin_lock_irqsave(&old_state->dev->event_lock, flags);
2419 if (new_crtc_state->event) {
2420 drm_crtc_send_vblank_event(crtc,
2421 new_crtc_state->event);
2422 new_crtc_state->event = NULL;
2423 }
2424 spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
2425 }
2426 }
2427 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2428
2429 /**
2430 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2431 * @old_state: atomic state object with old state structures
2432 *
2433 * This function is used to signal completion of the hardware commit step. After
2434 * this step the driver is not allowed to read or change any permanent software
2435 * or hardware modeset state. The only exception is state protected by other
2436 * means than &drm_modeset_lock locks.
2437 *
2438 * Drivers should try to postpone any expensive or delayed cleanup work after
2439 * this function is called.
2440 *
2441 * This is part of the atomic helper support for nonblocking commits, see
2442 * drm_atomic_helper_setup_commit() for an overview.
2443 */
drm_atomic_helper_commit_hw_done(struct drm_atomic_state * old_state)2444 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
2445 {
2446 struct drm_crtc *crtc;
2447 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2448 struct drm_crtc_commit *commit;
2449 int i;
2450
2451 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2452 commit = new_crtc_state->commit;
2453 if (!commit)
2454 continue;
2455
2456 /*
2457 * copy new_crtc_state->commit to old_crtc_state->commit,
2458 * it's unsafe to touch new_crtc_state after hw_done,
2459 * but we still need to do so in cleanup_done().
2460 */
2461 if (old_crtc_state->commit)
2462 drm_crtc_commit_put(old_crtc_state->commit);
2463
2464 old_crtc_state->commit = drm_crtc_commit_get(commit);
2465
2466 /* backend must have consumed any event by now */
2467 WARN_ON(new_crtc_state->event);
2468 complete_all(&commit->hw_done);
2469 }
2470
2471 if (old_state->fake_commit) {
2472 complete_all(&old_state->fake_commit->hw_done);
2473 complete_all(&old_state->fake_commit->flip_done);
2474 }
2475 }
2476 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2477
2478 /**
2479 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2480 * @old_state: atomic state object with old state structures
2481 *
2482 * This signals completion of the atomic update @old_state, including any
2483 * cleanup work. If used, it must be called right before calling
2484 * drm_atomic_state_put().
2485 *
2486 * This is part of the atomic helper support for nonblocking commits, see
2487 * drm_atomic_helper_setup_commit() for an overview.
2488 */
drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state * old_state)2489 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
2490 {
2491 struct drm_crtc *crtc;
2492 struct drm_crtc_state *old_crtc_state;
2493 struct drm_crtc_commit *commit;
2494 int i;
2495
2496 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2497 commit = old_crtc_state->commit;
2498 if (WARN_ON(!commit))
2499 continue;
2500
2501 complete_all(&commit->cleanup_done);
2502 WARN_ON(!try_wait_for_completion(&commit->hw_done));
2503
2504 spin_lock(&crtc->commit_lock);
2505 list_del(&commit->commit_entry);
2506 spin_unlock(&crtc->commit_lock);
2507 }
2508
2509 if (old_state->fake_commit) {
2510 complete_all(&old_state->fake_commit->cleanup_done);
2511 WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
2512 }
2513 }
2514 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2515
2516 /**
2517 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2518 * @dev: DRM device
2519 * @state: atomic state object with new state structures
2520 *
2521 * This function prepares plane state, specifically framebuffers, for the new
2522 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2523 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2524 * any already successfully prepared framebuffer.
2525 *
2526 * Returns:
2527 * 0 on success, negative error code on failure.
2528 */
drm_atomic_helper_prepare_planes(struct drm_device * dev,struct drm_atomic_state * state)2529 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2530 struct drm_atomic_state *state)
2531 {
2532 struct drm_connector *connector;
2533 struct drm_connector_state *new_conn_state;
2534 struct drm_plane *plane;
2535 struct drm_plane_state *new_plane_state;
2536 int ret, i, j;
2537
2538 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2539 if (!new_conn_state->writeback_job)
2540 continue;
2541
2542 ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2543 if (ret < 0)
2544 return ret;
2545 }
2546
2547 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2548 const struct drm_plane_helper_funcs *funcs;
2549
2550 funcs = plane->helper_private;
2551
2552 if (funcs->prepare_fb) {
2553 ret = funcs->prepare_fb(plane, new_plane_state);
2554 if (ret)
2555 goto fail;
2556 } else {
2557 WARN_ON_ONCE(funcs->cleanup_fb);
2558
2559 if (!drm_core_check_feature(dev, DRIVER_GEM))
2560 continue;
2561
2562 ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
2563 if (ret)
2564 goto fail;
2565 }
2566 }
2567
2568 return 0;
2569
2570 fail:
2571 for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2572 const struct drm_plane_helper_funcs *funcs;
2573
2574 if (j >= i)
2575 continue;
2576
2577 funcs = plane->helper_private;
2578
2579 if (funcs->cleanup_fb)
2580 funcs->cleanup_fb(plane, new_plane_state);
2581 }
2582
2583 return ret;
2584 }
2585 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2586
plane_crtc_active(const struct drm_plane_state * state)2587 static bool plane_crtc_active(const struct drm_plane_state *state)
2588 {
2589 return state->crtc && state->crtc->state->active;
2590 }
2591
2592 /**
2593 * drm_atomic_helper_commit_planes - commit plane state
2594 * @dev: DRM device
2595 * @old_state: atomic state object with old state structures
2596 * @flags: flags for committing plane state
2597 *
2598 * This function commits the new plane state using the plane and atomic helper
2599 * functions for planes and CRTCs. It assumes that the atomic state has already
2600 * been pushed into the relevant object state pointers, since this step can no
2601 * longer fail.
2602 *
2603 * It still requires the global state object @old_state to know which planes and
2604 * crtcs need to be updated though.
2605 *
2606 * Note that this function does all plane updates across all CRTCs in one step.
2607 * If the hardware can't support this approach look at
2608 * drm_atomic_helper_commit_planes_on_crtc() instead.
2609 *
2610 * Plane parameters can be updated by applications while the associated CRTC is
2611 * disabled. The DRM/KMS core will store the parameters in the plane state,
2612 * which will be available to the driver when the CRTC is turned on. As a result
2613 * most drivers don't need to be immediately notified of plane updates for a
2614 * disabled CRTC.
2615 *
2616 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2617 * @flags in order not to receive plane update notifications related to a
2618 * disabled CRTC. This avoids the need to manually ignore plane updates in
2619 * driver code when the driver and/or hardware can't or just don't need to deal
2620 * with updates on disabled CRTCs, for example when supporting runtime PM.
2621 *
2622 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2623 * display controllers require to disable a CRTC's planes when the CRTC is
2624 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2625 * call for a plane if the CRTC of the old plane state needs a modesetting
2626 * operation. Of course, the drivers need to disable the planes in their CRTC
2627 * disable callbacks since no one else would do that.
2628 *
2629 * The drm_atomic_helper_commit() default implementation doesn't set the
2630 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2631 * This should not be copied blindly by drivers.
2632 */
drm_atomic_helper_commit_planes(struct drm_device * dev,struct drm_atomic_state * old_state,uint32_t flags)2633 void drm_atomic_helper_commit_planes(struct drm_device *dev,
2634 struct drm_atomic_state *old_state,
2635 uint32_t flags)
2636 {
2637 struct drm_crtc *crtc;
2638 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2639 struct drm_plane *plane;
2640 struct drm_plane_state *old_plane_state, *new_plane_state;
2641 int i;
2642 bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2643 bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2644
2645 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2646 const struct drm_crtc_helper_funcs *funcs;
2647
2648 funcs = crtc->helper_private;
2649
2650 if (!funcs || !funcs->atomic_begin)
2651 continue;
2652
2653 if (active_only && !new_crtc_state->active)
2654 continue;
2655
2656 funcs->atomic_begin(crtc, old_state);
2657 }
2658
2659 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2660 const struct drm_plane_helper_funcs *funcs;
2661 bool disabling;
2662
2663 funcs = plane->helper_private;
2664
2665 if (!funcs)
2666 continue;
2667
2668 disabling = drm_atomic_plane_disabling(old_plane_state,
2669 new_plane_state);
2670
2671 if (active_only) {
2672 /*
2673 * Skip planes related to inactive CRTCs. If the plane
2674 * is enabled use the state of the current CRTC. If the
2675 * plane is being disabled use the state of the old
2676 * CRTC to avoid skipping planes being disabled on an
2677 * active CRTC.
2678 */
2679 if (!disabling && !plane_crtc_active(new_plane_state))
2680 continue;
2681 if (disabling && !plane_crtc_active(old_plane_state))
2682 continue;
2683 }
2684
2685 /*
2686 * Special-case disabling the plane if drivers support it.
2687 */
2688 if (disabling && funcs->atomic_disable) {
2689 struct drm_crtc_state *crtc_state;
2690
2691 crtc_state = old_plane_state->crtc->state;
2692
2693 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2694 no_disable)
2695 continue;
2696
2697 funcs->atomic_disable(plane, old_state);
2698 } else if (new_plane_state->crtc || disabling) {
2699 funcs->atomic_update(plane, old_state);
2700 }
2701 }
2702
2703 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2704 const struct drm_crtc_helper_funcs *funcs;
2705
2706 funcs = crtc->helper_private;
2707
2708 if (!funcs || !funcs->atomic_flush)
2709 continue;
2710
2711 if (active_only && !new_crtc_state->active)
2712 continue;
2713
2714 funcs->atomic_flush(crtc, old_state);
2715 }
2716 }
2717 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2718
2719 /**
2720 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
2721 * @old_crtc_state: atomic state object with the old CRTC state
2722 *
2723 * This function commits the new plane state using the plane and atomic helper
2724 * functions for planes on the specific CRTC. It assumes that the atomic state
2725 * has already been pushed into the relevant object state pointers, since this
2726 * step can no longer fail.
2727 *
2728 * This function is useful when plane updates should be done CRTC-by-CRTC
2729 * instead of one global step like drm_atomic_helper_commit_planes() does.
2730 *
2731 * This function can only be savely used when planes are not allowed to move
2732 * between different CRTCs because this function doesn't handle inter-CRTC
2733 * dependencies. Callers need to ensure that either no such dependencies exist,
2734 * resolve them through ordering of commit calls or through some other means.
2735 */
2736 void
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state * old_crtc_state)2737 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2738 {
2739 const struct drm_crtc_helper_funcs *crtc_funcs;
2740 struct drm_crtc *crtc = old_crtc_state->crtc;
2741 struct drm_atomic_state *old_state = old_crtc_state->state;
2742 struct drm_crtc_state *new_crtc_state =
2743 drm_atomic_get_new_crtc_state(old_state, crtc);
2744 struct drm_plane *plane;
2745 unsigned int plane_mask;
2746
2747 plane_mask = old_crtc_state->plane_mask;
2748 plane_mask |= new_crtc_state->plane_mask;
2749
2750 crtc_funcs = crtc->helper_private;
2751 if (crtc_funcs && crtc_funcs->atomic_begin)
2752 crtc_funcs->atomic_begin(crtc, old_state);
2753
2754 drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
2755 struct drm_plane_state *old_plane_state =
2756 drm_atomic_get_old_plane_state(old_state, plane);
2757 struct drm_plane_state *new_plane_state =
2758 drm_atomic_get_new_plane_state(old_state, plane);
2759 const struct drm_plane_helper_funcs *plane_funcs;
2760
2761 plane_funcs = plane->helper_private;
2762
2763 if (!old_plane_state || !plane_funcs)
2764 continue;
2765
2766 WARN_ON(new_plane_state->crtc &&
2767 new_plane_state->crtc != crtc);
2768
2769 if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
2770 plane_funcs->atomic_disable)
2771 plane_funcs->atomic_disable(plane, old_state);
2772 else if (new_plane_state->crtc ||
2773 drm_atomic_plane_disabling(old_plane_state, new_plane_state))
2774 plane_funcs->atomic_update(plane, old_state);
2775 }
2776
2777 if (crtc_funcs && crtc_funcs->atomic_flush)
2778 crtc_funcs->atomic_flush(crtc, old_state);
2779 }
2780 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2781
2782 /**
2783 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2784 * @old_crtc_state: atomic state object with the old CRTC state
2785 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2786 *
2787 * Disables all planes associated with the given CRTC. This can be
2788 * used for instance in the CRTC helper atomic_disable callback to disable
2789 * all planes.
2790 *
2791 * If the atomic-parameter is set the function calls the CRTC's
2792 * atomic_begin hook before and atomic_flush hook after disabling the
2793 * planes.
2794 *
2795 * It is a bug to call this function without having implemented the
2796 * &drm_plane_helper_funcs.atomic_disable plane hook.
2797 */
2798 void
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state * old_crtc_state,bool atomic)2799 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
2800 bool atomic)
2801 {
2802 struct drm_crtc *crtc = old_crtc_state->crtc;
2803 const struct drm_crtc_helper_funcs *crtc_funcs =
2804 crtc->helper_private;
2805 struct drm_plane *plane;
2806
2807 if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
2808 crtc_funcs->atomic_begin(crtc, NULL);
2809
2810 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
2811 const struct drm_plane_helper_funcs *plane_funcs =
2812 plane->helper_private;
2813
2814 if (!plane_funcs)
2815 continue;
2816
2817 WARN_ON(!plane_funcs->atomic_disable);
2818 if (plane_funcs->atomic_disable)
2819 plane_funcs->atomic_disable(plane, NULL);
2820 }
2821
2822 if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
2823 crtc_funcs->atomic_flush(crtc, NULL);
2824 }
2825 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2826
2827 /**
2828 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2829 * @dev: DRM device
2830 * @old_state: atomic state object with old state structures
2831 *
2832 * This function cleans up plane state, specifically framebuffers, from the old
2833 * configuration. Hence the old configuration must be perserved in @old_state to
2834 * be able to call this function.
2835 *
2836 * This function must also be called on the new state when the atomic update
2837 * fails at any point after calling drm_atomic_helper_prepare_planes().
2838 */
drm_atomic_helper_cleanup_planes(struct drm_device * dev,struct drm_atomic_state * old_state)2839 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
2840 struct drm_atomic_state *old_state)
2841 {
2842 struct drm_plane *plane;
2843 struct drm_plane_state *old_plane_state, *new_plane_state;
2844 int i;
2845
2846 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2847 const struct drm_plane_helper_funcs *funcs;
2848 struct drm_plane_state *plane_state;
2849
2850 /*
2851 * This might be called before swapping when commit is aborted,
2852 * in which case we have to cleanup the new state.
2853 */
2854 if (old_plane_state == plane->state)
2855 plane_state = new_plane_state;
2856 else
2857 plane_state = old_plane_state;
2858
2859 funcs = plane->helper_private;
2860
2861 if (funcs->cleanup_fb)
2862 funcs->cleanup_fb(plane, plane_state);
2863 }
2864 }
2865 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
2866
2867 /**
2868 * drm_atomic_helper_swap_state - store atomic state into current sw state
2869 * @state: atomic state
2870 * @stall: stall for preceding commits
2871 *
2872 * This function stores the atomic state into the current state pointers in all
2873 * driver objects. It should be called after all failing steps have been done
2874 * and succeeded, but before the actual hardware state is committed.
2875 *
2876 * For cleanup and error recovery the current state for all changed objects will
2877 * be swapped into @state.
2878 *
2879 * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
2880 *
2881 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
2882 *
2883 * 2. Do any other steps that might fail.
2884 *
2885 * 3. Put the staged state into the current state pointers with this function.
2886 *
2887 * 4. Actually commit the hardware state.
2888 *
2889 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
2890 * contains the old state. Also do any other cleanup required with that state.
2891 *
2892 * @stall must be set when nonblocking commits for this driver directly access
2893 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
2894 * the current atomic helpers this is almost always the case, since the helpers
2895 * don't pass the right state structures to the callbacks.
2896 *
2897 * Returns:
2898 *
2899 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
2900 * waiting for the previous commits has been interrupted.
2901 */
drm_atomic_helper_swap_state(struct drm_atomic_state * state,bool stall)2902 int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
2903 bool stall)
2904 {
2905 int i, ret;
2906 struct drm_connector *connector;
2907 struct drm_connector_state *old_conn_state, *new_conn_state;
2908 struct drm_crtc *crtc;
2909 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2910 struct drm_plane *plane;
2911 struct drm_plane_state *old_plane_state, *new_plane_state;
2912 struct drm_crtc_commit *commit;
2913 struct drm_private_obj *obj;
2914 struct drm_private_state *old_obj_state, *new_obj_state;
2915
2916 if (stall) {
2917 /*
2918 * We have to stall for hw_done here before
2919 * drm_atomic_helper_wait_for_dependencies() because flip
2920 * depth > 1 is not yet supported by all drivers. As long as
2921 * obj->state is directly dereferenced anywhere in the drivers
2922 * atomic_commit_tail function, then it's unsafe to swap state
2923 * before drm_atomic_helper_commit_hw_done() is called.
2924 */
2925
2926 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2927 commit = old_crtc_state->commit;
2928
2929 if (!commit)
2930 continue;
2931
2932 ret = wait_for_completion_interruptible(&commit->hw_done);
2933 if (ret)
2934 return ret;
2935 }
2936
2937 for_each_old_connector_in_state(state, connector, old_conn_state, i) {
2938 commit = old_conn_state->commit;
2939
2940 if (!commit)
2941 continue;
2942
2943 ret = wait_for_completion_interruptible(&commit->hw_done);
2944 if (ret)
2945 return ret;
2946 }
2947
2948 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2949 commit = old_plane_state->commit;
2950
2951 if (!commit)
2952 continue;
2953
2954 ret = wait_for_completion_interruptible(&commit->hw_done);
2955 if (ret)
2956 return ret;
2957 }
2958 }
2959
2960 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
2961 WARN_ON(connector->state != old_conn_state);
2962
2963 old_conn_state->state = state;
2964 new_conn_state->state = NULL;
2965
2966 state->connectors[i].state = old_conn_state;
2967 connector->state = new_conn_state;
2968 }
2969
2970 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2971 WARN_ON(crtc->state != old_crtc_state);
2972
2973 old_crtc_state->state = state;
2974 new_crtc_state->state = NULL;
2975
2976 state->crtcs[i].state = old_crtc_state;
2977 crtc->state = new_crtc_state;
2978
2979 if (new_crtc_state->commit) {
2980 spin_lock(&crtc->commit_lock);
2981 list_add(&new_crtc_state->commit->commit_entry,
2982 &crtc->commit_list);
2983 spin_unlock(&crtc->commit_lock);
2984
2985 new_crtc_state->commit->event = NULL;
2986 }
2987 }
2988
2989 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2990 WARN_ON(plane->state != old_plane_state);
2991
2992 old_plane_state->state = state;
2993 new_plane_state->state = NULL;
2994
2995 state->planes[i].state = old_plane_state;
2996 plane->state = new_plane_state;
2997 }
2998
2999 for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
3000 WARN_ON(obj->state != old_obj_state);
3001
3002 old_obj_state->state = state;
3003 new_obj_state->state = NULL;
3004
3005 state->private_objs[i].state = old_obj_state;
3006 obj->state = new_obj_state;
3007 }
3008
3009 return 0;
3010 }
3011 EXPORT_SYMBOL(drm_atomic_helper_swap_state);
3012
3013 /**
3014 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
3015 * @plane: plane object to update
3016 * @crtc: owning CRTC of owning plane
3017 * @fb: framebuffer to flip onto plane
3018 * @crtc_x: x offset of primary plane on @crtc
3019 * @crtc_y: y offset of primary plane on @crtc
3020 * @crtc_w: width of primary plane rectangle on @crtc
3021 * @crtc_h: height of primary plane rectangle on @crtc
3022 * @src_x: x offset of @fb for panning
3023 * @src_y: y offset of @fb for panning
3024 * @src_w: width of source rectangle in @fb
3025 * @src_h: height of source rectangle in @fb
3026 * @ctx: lock acquire context
3027 *
3028 * Provides a default plane update handler using the atomic driver interface.
3029 *
3030 * RETURNS:
3031 * Zero on success, error code on failure
3032 */
drm_atomic_helper_update_plane(struct drm_plane * plane,struct drm_crtc * crtc,struct drm_framebuffer * fb,int crtc_x,int crtc_y,unsigned int crtc_w,unsigned int crtc_h,uint32_t src_x,uint32_t src_y,uint32_t src_w,uint32_t src_h,struct drm_modeset_acquire_ctx * ctx)3033 int drm_atomic_helper_update_plane(struct drm_plane *plane,
3034 struct drm_crtc *crtc,
3035 struct drm_framebuffer *fb,
3036 int crtc_x, int crtc_y,
3037 unsigned int crtc_w, unsigned int crtc_h,
3038 uint32_t src_x, uint32_t src_y,
3039 uint32_t src_w, uint32_t src_h,
3040 struct drm_modeset_acquire_ctx *ctx)
3041 {
3042 struct drm_atomic_state *state;
3043 struct drm_plane_state *plane_state;
3044 int ret = 0;
3045
3046 state = drm_atomic_state_alloc(plane->dev);
3047 if (!state)
3048 return -ENOMEM;
3049
3050 state->acquire_ctx = ctx;
3051 plane_state = drm_atomic_get_plane_state(state, plane);
3052 if (IS_ERR(plane_state)) {
3053 ret = PTR_ERR(plane_state);
3054 goto fail;
3055 }
3056
3057 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3058 if (ret != 0)
3059 goto fail;
3060 drm_atomic_set_fb_for_plane(plane_state, fb);
3061 plane_state->crtc_x = crtc_x;
3062 plane_state->crtc_y = crtc_y;
3063 plane_state->crtc_w = crtc_w;
3064 plane_state->crtc_h = crtc_h;
3065 plane_state->src_x = src_x;
3066 plane_state->src_y = src_y;
3067 plane_state->src_w = src_w;
3068 plane_state->src_h = src_h;
3069
3070 if (plane == crtc->cursor)
3071 state->legacy_cursor_update = true;
3072
3073 ret = drm_atomic_commit(state);
3074 fail:
3075 drm_atomic_state_put(state);
3076 return ret;
3077 }
3078 EXPORT_SYMBOL(drm_atomic_helper_update_plane);
3079
3080 /**
3081 * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
3082 * @plane: plane to disable
3083 * @ctx: lock acquire context
3084 *
3085 * Provides a default plane disable handler using the atomic driver interface.
3086 *
3087 * RETURNS:
3088 * Zero on success, error code on failure
3089 */
drm_atomic_helper_disable_plane(struct drm_plane * plane,struct drm_modeset_acquire_ctx * ctx)3090 int drm_atomic_helper_disable_plane(struct drm_plane *plane,
3091 struct drm_modeset_acquire_ctx *ctx)
3092 {
3093 struct drm_atomic_state *state;
3094 struct drm_plane_state *plane_state;
3095 int ret = 0;
3096
3097 state = drm_atomic_state_alloc(plane->dev);
3098 if (!state)
3099 return -ENOMEM;
3100
3101 state->acquire_ctx = ctx;
3102 plane_state = drm_atomic_get_plane_state(state, plane);
3103 if (IS_ERR(plane_state)) {
3104 ret = PTR_ERR(plane_state);
3105 goto fail;
3106 }
3107
3108 if (plane_state->crtc && plane_state->crtc->cursor == plane)
3109 plane_state->state->legacy_cursor_update = true;
3110
3111 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
3112 if (ret != 0)
3113 goto fail;
3114
3115 ret = drm_atomic_commit(state);
3116 fail:
3117 drm_atomic_state_put(state);
3118 return ret;
3119 }
3120 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
3121
3122 /**
3123 * drm_atomic_helper_set_config - set a new config from userspace
3124 * @set: mode set configuration
3125 * @ctx: lock acquisition context
3126 *
3127 * Provides a default CRTC set_config handler using the atomic driver interface.
3128 *
3129 * NOTE: For backwards compatibility with old userspace this automatically
3130 * resets the "link-status" property to GOOD, to force any link
3131 * re-training. The SETCRTC ioctl does not define whether an update does
3132 * need a full modeset or just a plane update, hence we're allowed to do
3133 * that. See also drm_connector_set_link_status_property().
3134 *
3135 * Returns:
3136 * Returns 0 on success, negative errno numbers on failure.
3137 */
drm_atomic_helper_set_config(struct drm_mode_set * set,struct drm_modeset_acquire_ctx * ctx)3138 int drm_atomic_helper_set_config(struct drm_mode_set *set,
3139 struct drm_modeset_acquire_ctx *ctx)
3140 {
3141 struct drm_atomic_state *state;
3142 struct drm_crtc *crtc = set->crtc;
3143 int ret = 0;
3144
3145 state = drm_atomic_state_alloc(crtc->dev);
3146 if (!state)
3147 return -ENOMEM;
3148
3149 state->acquire_ctx = ctx;
3150 ret = __drm_atomic_helper_set_config(set, state);
3151 if (ret != 0)
3152 goto fail;
3153
3154 ret = handle_conflicting_encoders(state, true);
3155 if (ret)
3156 goto fail;
3157
3158 ret = drm_atomic_commit(state);
3159
3160 fail:
3161 drm_atomic_state_put(state);
3162 return ret;
3163 }
3164 EXPORT_SYMBOL(drm_atomic_helper_set_config);
3165
3166 /**
3167 * drm_atomic_helper_disable_all - disable all currently active outputs
3168 * @dev: DRM device
3169 * @ctx: lock acquisition context
3170 *
3171 * Loops through all connectors, finding those that aren't turned off and then
3172 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3173 * that they are connected to.
3174 *
3175 * This is used for example in suspend/resume to disable all currently active
3176 * functions when suspending. If you just want to shut down everything at e.g.
3177 * driver unload, look at drm_atomic_helper_shutdown().
3178 *
3179 * Note that if callers haven't already acquired all modeset locks this might
3180 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3181 *
3182 * Returns:
3183 * 0 on success or a negative error code on failure.
3184 *
3185 * See also:
3186 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3187 * drm_atomic_helper_shutdown().
3188 */
drm_atomic_helper_disable_all(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3189 int drm_atomic_helper_disable_all(struct drm_device *dev,
3190 struct drm_modeset_acquire_ctx *ctx)
3191 {
3192 struct drm_atomic_state *state;
3193 struct drm_connector_state *conn_state;
3194 struct drm_connector *conn;
3195 struct drm_plane_state *plane_state;
3196 struct drm_plane *plane;
3197 struct drm_crtc_state *crtc_state;
3198 struct drm_crtc *crtc;
3199 int ret, i;
3200
3201 state = drm_atomic_state_alloc(dev);
3202 if (!state)
3203 return -ENOMEM;
3204
3205 state->acquire_ctx = ctx;
3206
3207 drm_for_each_crtc(crtc, dev) {
3208 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3209 if (IS_ERR(crtc_state)) {
3210 ret = PTR_ERR(crtc_state);
3211 goto free;
3212 }
3213
3214 crtc_state->active = false;
3215
3216 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3217 if (ret < 0)
3218 goto free;
3219
3220 ret = drm_atomic_add_affected_planes(state, crtc);
3221 if (ret < 0)
3222 goto free;
3223
3224 ret = drm_atomic_add_affected_connectors(state, crtc);
3225 if (ret < 0)
3226 goto free;
3227 }
3228
3229 for_each_new_connector_in_state(state, conn, conn_state, i) {
3230 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3231 if (ret < 0)
3232 goto free;
3233 }
3234
3235 for_each_new_plane_in_state(state, plane, plane_state, i) {
3236 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3237 if (ret < 0)
3238 goto free;
3239
3240 drm_atomic_set_fb_for_plane(plane_state, NULL);
3241 }
3242
3243 ret = drm_atomic_commit(state);
3244 free:
3245 drm_atomic_state_put(state);
3246 return ret;
3247 }
3248 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3249
3250 /**
3251 * drm_atomic_helper_shutdown - shutdown all CRTC
3252 * @dev: DRM device
3253 *
3254 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3255 * suspend should instead be handled with drm_atomic_helper_suspend(), since
3256 * that also takes a snapshot of the modeset state to be restored on resume.
3257 *
3258 * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3259 * and it is the atomic version of drm_crtc_force_disable_all().
3260 */
drm_atomic_helper_shutdown(struct drm_device * dev)3261 void drm_atomic_helper_shutdown(struct drm_device *dev)
3262 {
3263 struct drm_modeset_acquire_ctx ctx;
3264 int ret;
3265
3266 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3267
3268 ret = drm_atomic_helper_disable_all(dev, &ctx);
3269 if (ret)
3270 drm_err(dev,
3271 "Disabling all crtc's during unload failed with %i\n",
3272 ret);
3273
3274 DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3275 }
3276 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3277
3278 /**
3279 * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3280 * @dev: DRM device
3281 * @ctx: lock acquisition context
3282 *
3283 * Makes a copy of the current atomic state by looping over all objects and
3284 * duplicating their respective states. This is used for example by suspend/
3285 * resume support code to save the state prior to suspend such that it can
3286 * be restored upon resume.
3287 *
3288 * Note that this treats atomic state as persistent between save and restore.
3289 * Drivers must make sure that this is possible and won't result in confusion
3290 * or erroneous behaviour.
3291 *
3292 * Note that if callers haven't already acquired all modeset locks this might
3293 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3294 *
3295 * Returns:
3296 * A pointer to the copy of the atomic state object on success or an
3297 * ERR_PTR()-encoded error code on failure.
3298 *
3299 * See also:
3300 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3301 */
3302 struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3303 drm_atomic_helper_duplicate_state(struct drm_device *dev,
3304 struct drm_modeset_acquire_ctx *ctx)
3305 {
3306 struct drm_atomic_state *state;
3307 struct drm_connector *conn;
3308 struct drm_connector_list_iter conn_iter;
3309 struct drm_plane *plane;
3310 struct drm_crtc *crtc;
3311 int err = 0;
3312
3313 state = drm_atomic_state_alloc(dev);
3314 if (!state)
3315 return ERR_PTR(-ENOMEM);
3316
3317 state->acquire_ctx = ctx;
3318 state->duplicated = true;
3319
3320 drm_for_each_crtc(crtc, dev) {
3321 struct drm_crtc_state *crtc_state;
3322
3323 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3324 if (IS_ERR(crtc_state)) {
3325 err = PTR_ERR(crtc_state);
3326 goto free;
3327 }
3328 }
3329
3330 drm_for_each_plane(plane, dev) {
3331 struct drm_plane_state *plane_state;
3332
3333 plane_state = drm_atomic_get_plane_state(state, plane);
3334 if (IS_ERR(plane_state)) {
3335 err = PTR_ERR(plane_state);
3336 goto free;
3337 }
3338 }
3339
3340 drm_connector_list_iter_begin(dev, &conn_iter);
3341 drm_for_each_connector_iter(conn, &conn_iter) {
3342 struct drm_connector_state *conn_state;
3343
3344 conn_state = drm_atomic_get_connector_state(state, conn);
3345 if (IS_ERR(conn_state)) {
3346 err = PTR_ERR(conn_state);
3347 drm_connector_list_iter_end(&conn_iter);
3348 goto free;
3349 }
3350 }
3351 drm_connector_list_iter_end(&conn_iter);
3352
3353 /* clear the acquire context so that it isn't accidentally reused */
3354 state->acquire_ctx = NULL;
3355
3356 free:
3357 if (err < 0) {
3358 drm_atomic_state_put(state);
3359 state = ERR_PTR(err);
3360 }
3361
3362 return state;
3363 }
3364 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3365
3366 /**
3367 * drm_atomic_helper_suspend - subsystem-level suspend helper
3368 * @dev: DRM device
3369 *
3370 * Duplicates the current atomic state, disables all active outputs and then
3371 * returns a pointer to the original atomic state to the caller. Drivers can
3372 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3373 * restore the output configuration that was active at the time the system
3374 * entered suspend.
3375 *
3376 * Note that it is potentially unsafe to use this. The atomic state object
3377 * returned by this function is assumed to be persistent. Drivers must ensure
3378 * that this holds true. Before calling this function, drivers must make sure
3379 * to suspend fbdev emulation so that nothing can be using the device.
3380 *
3381 * Returns:
3382 * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3383 * encoded error code on failure. Drivers should store the returned atomic
3384 * state object and pass it to the drm_atomic_helper_resume() helper upon
3385 * resume.
3386 *
3387 * See also:
3388 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3389 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3390 */
drm_atomic_helper_suspend(struct drm_device * dev)3391 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3392 {
3393 struct drm_modeset_acquire_ctx ctx;
3394 struct drm_atomic_state *state;
3395 int err;
3396
3397 /* This can never be returned, but it makes the compiler happy */
3398 state = ERR_PTR(-EINVAL);
3399
3400 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3401
3402 state = drm_atomic_helper_duplicate_state(dev, &ctx);
3403 if (IS_ERR(state))
3404 goto unlock;
3405
3406 err = drm_atomic_helper_disable_all(dev, &ctx);
3407 if (err < 0) {
3408 drm_atomic_state_put(state);
3409 state = ERR_PTR(err);
3410 goto unlock;
3411 }
3412
3413 unlock:
3414 DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3415 if (err)
3416 return ERR_PTR(err);
3417
3418 return state;
3419 }
3420 EXPORT_SYMBOL(drm_atomic_helper_suspend);
3421
3422 /**
3423 * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3424 * @state: duplicated atomic state to commit
3425 * @ctx: pointer to acquire_ctx to use for commit.
3426 *
3427 * The state returned by drm_atomic_helper_duplicate_state() and
3428 * drm_atomic_helper_suspend() is partially invalid, and needs to
3429 * be fixed up before commit.
3430 *
3431 * Returns:
3432 * 0 on success or a negative error code on failure.
3433 *
3434 * See also:
3435 * drm_atomic_helper_suspend()
3436 */
drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)3437 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3438 struct drm_modeset_acquire_ctx *ctx)
3439 {
3440 int i, ret;
3441 struct drm_plane *plane;
3442 struct drm_plane_state *new_plane_state;
3443 struct drm_connector *connector;
3444 struct drm_connector_state *new_conn_state;
3445 struct drm_crtc *crtc;
3446 struct drm_crtc_state *new_crtc_state;
3447
3448 state->acquire_ctx = ctx;
3449
3450 for_each_new_plane_in_state(state, plane, new_plane_state, i)
3451 state->planes[i].old_state = plane->state;
3452
3453 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3454 state->crtcs[i].old_state = crtc->state;
3455
3456 for_each_new_connector_in_state(state, connector, new_conn_state, i)
3457 state->connectors[i].old_state = connector->state;
3458
3459 ret = drm_atomic_commit(state);
3460
3461 state->acquire_ctx = NULL;
3462
3463 return ret;
3464 }
3465 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3466
3467 /**
3468 * drm_atomic_helper_resume - subsystem-level resume helper
3469 * @dev: DRM device
3470 * @state: atomic state to resume to
3471 *
3472 * Calls drm_mode_config_reset() to synchronize hardware and software states,
3473 * grabs all modeset locks and commits the atomic state object. This can be
3474 * used in conjunction with the drm_atomic_helper_suspend() helper to
3475 * implement suspend/resume for drivers that support atomic mode-setting.
3476 *
3477 * Returns:
3478 * 0 on success or a negative error code on failure.
3479 *
3480 * See also:
3481 * drm_atomic_helper_suspend()
3482 */
drm_atomic_helper_resume(struct drm_device * dev,struct drm_atomic_state * state)3483 int drm_atomic_helper_resume(struct drm_device *dev,
3484 struct drm_atomic_state *state)
3485 {
3486 struct drm_modeset_acquire_ctx ctx;
3487 int err;
3488
3489 drm_mode_config_reset(dev);
3490
3491 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3492
3493 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3494
3495 DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3496 drm_atomic_state_put(state);
3497
3498 return err;
3499 }
3500 EXPORT_SYMBOL(drm_atomic_helper_resume);
3501
page_flip_common(struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags)3502 static int page_flip_common(struct drm_atomic_state *state,
3503 struct drm_crtc *crtc,
3504 struct drm_framebuffer *fb,
3505 struct drm_pending_vblank_event *event,
3506 uint32_t flags)
3507 {
3508 struct drm_plane *plane = crtc->primary;
3509 struct drm_plane_state *plane_state;
3510 struct drm_crtc_state *crtc_state;
3511 int ret = 0;
3512
3513 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3514 if (IS_ERR(crtc_state))
3515 return PTR_ERR(crtc_state);
3516
3517 crtc_state->event = event;
3518 crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3519
3520 plane_state = drm_atomic_get_plane_state(state, plane);
3521 if (IS_ERR(plane_state))
3522 return PTR_ERR(plane_state);
3523
3524 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3525 if (ret != 0)
3526 return ret;
3527 drm_atomic_set_fb_for_plane(plane_state, fb);
3528
3529 /* Make sure we don't accidentally do a full modeset. */
3530 state->allow_modeset = false;
3531 if (!crtc_state->active) {
3532 drm_dbg_atomic(crtc->dev,
3533 "[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3534 crtc->base.id, crtc->name);
3535 return -EINVAL;
3536 }
3537
3538 return ret;
3539 }
3540
3541 /**
3542 * drm_atomic_helper_page_flip - execute a legacy page flip
3543 * @crtc: DRM CRTC
3544 * @fb: DRM framebuffer
3545 * @event: optional DRM event to signal upon completion
3546 * @flags: flip flags for non-vblank sync'ed updates
3547 * @ctx: lock acquisition context
3548 *
3549 * Provides a default &drm_crtc_funcs.page_flip implementation
3550 * using the atomic driver interface.
3551 *
3552 * Returns:
3553 * Returns 0 on success, negative errno numbers on failure.
3554 *
3555 * See also:
3556 * drm_atomic_helper_page_flip_target()
3557 */
drm_atomic_helper_page_flip(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,struct drm_modeset_acquire_ctx * ctx)3558 int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3559 struct drm_framebuffer *fb,
3560 struct drm_pending_vblank_event *event,
3561 uint32_t flags,
3562 struct drm_modeset_acquire_ctx *ctx)
3563 {
3564 struct drm_plane *plane = crtc->primary;
3565 struct drm_atomic_state *state;
3566 int ret = 0;
3567
3568 state = drm_atomic_state_alloc(plane->dev);
3569 if (!state)
3570 return -ENOMEM;
3571
3572 state->acquire_ctx = ctx;
3573
3574 ret = page_flip_common(state, crtc, fb, event, flags);
3575 if (ret != 0)
3576 goto fail;
3577
3578 ret = drm_atomic_nonblocking_commit(state);
3579 fail:
3580 drm_atomic_state_put(state);
3581 return ret;
3582 }
3583 EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3584
3585 /**
3586 * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3587 * @crtc: DRM CRTC
3588 * @fb: DRM framebuffer
3589 * @event: optional DRM event to signal upon completion
3590 * @flags: flip flags for non-vblank sync'ed updates
3591 * @target: specifying the target vblank period when the flip to take effect
3592 * @ctx: lock acquisition context
3593 *
3594 * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3595 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3596 * target vblank period to flip.
3597 *
3598 * Returns:
3599 * Returns 0 on success, negative errno numbers on failure.
3600 */
drm_atomic_helper_page_flip_target(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,uint32_t target,struct drm_modeset_acquire_ctx * ctx)3601 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3602 struct drm_framebuffer *fb,
3603 struct drm_pending_vblank_event *event,
3604 uint32_t flags,
3605 uint32_t target,
3606 struct drm_modeset_acquire_ctx *ctx)
3607 {
3608 struct drm_plane *plane = crtc->primary;
3609 struct drm_atomic_state *state;
3610 struct drm_crtc_state *crtc_state;
3611 int ret = 0;
3612
3613 state = drm_atomic_state_alloc(plane->dev);
3614 if (!state)
3615 return -ENOMEM;
3616
3617 state->acquire_ctx = ctx;
3618
3619 ret = page_flip_common(state, crtc, fb, event, flags);
3620 if (ret != 0)
3621 goto fail;
3622
3623 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3624 if (WARN_ON(!crtc_state)) {
3625 ret = -EINVAL;
3626 goto fail;
3627 }
3628 crtc_state->target_vblank = target;
3629
3630 ret = drm_atomic_nonblocking_commit(state);
3631 fail:
3632 drm_atomic_state_put(state);
3633 return ret;
3634 }
3635 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3636
3637 /**
3638 * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
3639 * the input end of a bridge
3640 * @bridge: bridge control structure
3641 * @bridge_state: new bridge state
3642 * @crtc_state: new CRTC state
3643 * @conn_state: new connector state
3644 * @output_fmt: tested output bus format
3645 * @num_input_fmts: will contain the size of the returned array
3646 *
3647 * This helper is a pluggable implementation of the
3648 * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
3649 * modify the bus configuration between their input and their output. It
3650 * returns an array of input formats with a single element set to @output_fmt.
3651 *
3652 * RETURNS:
3653 * a valid format array of size @num_input_fmts, or NULL if the allocation
3654 * failed
3655 */
3656 u32 *
drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge * bridge,struct drm_bridge_state * bridge_state,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state,u32 output_fmt,unsigned int * num_input_fmts)3657 drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
3658 struct drm_bridge_state *bridge_state,
3659 struct drm_crtc_state *crtc_state,
3660 struct drm_connector_state *conn_state,
3661 u32 output_fmt,
3662 unsigned int *num_input_fmts)
3663 {
3664 u32 *input_fmts;
3665
3666 input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
3667 if (!input_fmts) {
3668 *num_input_fmts = 0;
3669 return NULL;
3670 }
3671
3672 *num_input_fmts = 1;
3673 input_fmts[0] = output_fmt;
3674 return input_fmts;
3675 }
3676 EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
3677