1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/dma-resv.h>
4 #include <linux/dma-fence-chain.h>
5
6 #include <drm/drm_atomic_state_helper.h>
7 #include <drm/drm_atomic_uapi.h>
8 #include <drm/drm_framebuffer.h>
9 #include <drm/drm_gem.h>
10 #include <drm/drm_gem_atomic_helper.h>
11 #include <drm/drm_gem_framebuffer_helper.h>
12 #include <drm/drm_simple_kms_helper.h>
13
14 #include "drm_internal.h"
15
16 /**
17 * DOC: overview
18 *
19 * The GEM atomic helpers library implements generic atomic-commit
20 * functions for drivers that use GEM objects. Currently, it provides
21 * synchronization helpers, and plane state and framebuffer BO mappings
22 * for planes with shadow buffers.
23 *
24 * Before scanout, a plane's framebuffer needs to be synchronized with
25 * possible writers that draw into the framebuffer. All drivers should
26 * call drm_gem_plane_helper_prepare_fb() from their implementation of
27 * struct &drm_plane_helper.prepare_fb . It sets the plane's fence from
28 * the framebuffer so that the DRM core can synchronize access automatically.
29 *
30 * drm_gem_plane_helper_prepare_fb() can also be used directly as
31 * implementation of prepare_fb. For drivers based on
32 * struct drm_simple_display_pipe, drm_gem_simple_display_pipe_prepare_fb()
33 * provides equivalent functionality.
34 *
35 * .. code-block:: c
36 *
37 * #include <drm/drm_gem_atomic_helper.h>
38 *
39 * struct drm_plane_helper_funcs driver_plane_helper_funcs = {
40 * ...,
41 * . prepare_fb = drm_gem_plane_helper_prepare_fb,
42 * };
43 *
44 * struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
45 * ...,
46 * . prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
47 * };
48 *
49 * A driver using a shadow buffer copies the content of the shadow buffers
50 * into the HW's framebuffer memory during an atomic update. This requires
51 * a mapping of the shadow buffer into kernel address space. The mappings
52 * cannot be established by commit-tail functions, such as atomic_update,
53 * as this would violate locking rules around dma_buf_vmap().
54 *
55 * The helpers for shadow-buffered planes establish and release mappings,
56 * and provide struct drm_shadow_plane_state, which stores the plane's mapping
57 * for commit-tail functions.
58 *
59 * Shadow-buffered planes can easily be enabled by using the provided macros
60 * %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS.
61 * These macros set up the plane and plane-helper callbacks to point to the
62 * shadow-buffer helpers.
63 *
64 * .. code-block:: c
65 *
66 * #include <drm/drm_gem_atomic_helper.h>
67 *
68 * struct drm_plane_funcs driver_plane_funcs = {
69 * ...,
70 * DRM_GEM_SHADOW_PLANE_FUNCS,
71 * };
72 *
73 * struct drm_plane_helper_funcs driver_plane_helper_funcs = {
74 * ...,
75 * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
76 * };
77 *
78 * In the driver's atomic-update function, shadow-buffer mappings are available
79 * from the plane state. Use to_drm_shadow_plane_state() to upcast from
80 * struct drm_plane_state.
81 *
82 * .. code-block:: c
83 *
84 * void driver_plane_atomic_update(struct drm_plane *plane,
85 * struct drm_plane_state *old_plane_state)
86 * {
87 * struct drm_plane_state *plane_state = plane->state;
88 * struct drm_shadow_plane_state *shadow_plane_state =
89 * to_drm_shadow_plane_state(plane_state);
90 *
91 * // access shadow buffer via shadow_plane_state->map
92 * }
93 *
94 * A mapping address for each of the framebuffer's buffer object is stored in
95 * struct &drm_shadow_plane_state.map. The mappings are valid while the state
96 * is being used.
97 *
98 * Drivers that use struct drm_simple_display_pipe can use
99 * %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp
100 * callbacks. Access to shadow-buffer mappings is similar to regular
101 * atomic_update.
102 *
103 * .. code-block:: c
104 *
105 * struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
106 * ...,
107 * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
108 * };
109 *
110 * void driver_pipe_enable(struct drm_simple_display_pipe *pipe,
111 * struct drm_crtc_state *crtc_state,
112 * struct drm_plane_state *plane_state)
113 * {
114 * struct drm_shadow_plane_state *shadow_plane_state =
115 * to_drm_shadow_plane_state(plane_state);
116 *
117 * // access shadow buffer via shadow_plane_state->map
118 * }
119 */
120
121 /*
122 * Plane Helpers
123 */
124
125 /**
126 * drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer
127 * @plane: Plane
128 * @state: Plane state the fence will be attached to
129 *
130 * This function extracts the exclusive fence from &drm_gem_object.resv and
131 * attaches it to plane state for the atomic helper to wait on. This is
132 * necessary to correctly implement implicit synchronization for any buffers
133 * shared as a struct &dma_buf. This function can be used as the
134 * &drm_plane_helper_funcs.prepare_fb callback.
135 *
136 * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
137 * GEM based framebuffer drivers which have their buffers always pinned in
138 * memory.
139 *
140 * This function is the default implementation for GEM drivers of
141 * &drm_plane_helper_funcs.prepare_fb if no callback is provided.
142 */
drm_gem_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * state)143 int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane,
144 struct drm_plane_state *state)
145 {
146 struct dma_fence *fence = dma_fence_get(state->fence);
147 enum dma_resv_usage usage;
148 size_t i;
149 int ret;
150
151 if (!state->fb)
152 return 0;
153
154 /*
155 * Only add the kernel fences here if there is already a fence set via
156 * explicit fencing interfaces on the atomic ioctl.
157 *
158 * This way explicit fencing can be used to overrule implicit fencing,
159 * which is important to make explicit fencing use-cases work: One
160 * example is using one buffer for 2 screens with different refresh
161 * rates. Implicit fencing will clamp rendering to the refresh rate of
162 * the slower screen, whereas explicit fence allows 2 independent
163 * render and display loops on a single buffer. If a driver allows
164 * obeys both implicit and explicit fences for plane updates, then it
165 * will break all the benefits of explicit fencing.
166 */
167 usage = fence ? DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_WRITE;
168
169 for (i = 0; i < state->fb->format->num_planes; ++i) {
170 struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i);
171 struct dma_fence *new;
172
173 if (!obj) {
174 ret = -EINVAL;
175 goto error;
176 }
177
178 ret = dma_resv_get_singleton(obj->resv, usage, &new);
179 if (ret)
180 goto error;
181
182 if (new && fence) {
183 struct dma_fence_chain *chain = dma_fence_chain_alloc();
184
185 if (!chain) {
186 ret = -ENOMEM;
187 goto error;
188 }
189
190 dma_fence_chain_init(chain, fence, new, 1);
191 fence = &chain->base;
192
193 } else if (new) {
194 fence = new;
195 }
196 }
197
198 dma_fence_put(state->fence);
199 state->fence = fence;
200 return 0;
201
202 error:
203 dma_fence_put(fence);
204 return ret;
205 }
206 EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
207
208 /**
209 * drm_gem_simple_display_pipe_prepare_fb - prepare_fb helper for &drm_simple_display_pipe
210 * @pipe: Simple display pipe
211 * @plane_state: Plane state
212 *
213 * This function uses drm_gem_plane_helper_prepare_fb() to extract the fences
214 * from &drm_gem_object.resv and attaches them to the plane state for the atomic
215 * helper to wait on. This is necessary to correctly implement implicit
216 * synchronization for any buffers shared as a struct &dma_buf. Drivers can use
217 * this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
218 *
219 * See drm_gem_plane_helper_prepare_fb() for a discussion of implicit and
220 * explicit fencing in atomic modeset updates.
221 */
drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe * pipe,struct drm_plane_state * plane_state)222 int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
223 struct drm_plane_state *plane_state)
224 {
225 return drm_gem_plane_helper_prepare_fb(&pipe->plane, plane_state);
226 }
227 EXPORT_SYMBOL(drm_gem_simple_display_pipe_prepare_fb);
228
229 /*
230 * Shadow-buffered Planes
231 */
232
233 /**
234 * __drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
235 * @plane: the plane
236 * @new_shadow_plane_state: the new shadow-buffered plane state
237 *
238 * This function duplicates shadow-buffered plane state. This is helpful for drivers
239 * that subclass struct drm_shadow_plane_state.
240 *
241 * The function does not duplicate existing mappings of the shadow buffers.
242 * Mappings are maintained during the atomic commit by the plane's prepare_fb
243 * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
244 * for corresponding helpers.
245 */
246 void
__drm_gem_duplicate_shadow_plane_state(struct drm_plane * plane,struct drm_shadow_plane_state * new_shadow_plane_state)247 __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
248 struct drm_shadow_plane_state *new_shadow_plane_state)
249 {
250 __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
251 }
252 EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
253
254 /**
255 * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
256 * @plane: the plane
257 *
258 * This function implements struct &drm_plane_funcs.atomic_duplicate_state for
259 * shadow-buffered planes. It assumes the existing state to be of type
260 * struct drm_shadow_plane_state and it allocates the new state to be of this
261 * type.
262 *
263 * The function does not duplicate existing mappings of the shadow buffers.
264 * Mappings are maintained during the atomic commit by the plane's prepare_fb
265 * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
266 * for corresponding helpers.
267 *
268 * Returns:
269 * A pointer to a new plane state on success, or NULL otherwise.
270 */
271 struct drm_plane_state *
drm_gem_duplicate_shadow_plane_state(struct drm_plane * plane)272 drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane)
273 {
274 struct drm_plane_state *plane_state = plane->state;
275 struct drm_shadow_plane_state *new_shadow_plane_state;
276
277 if (!plane_state)
278 return NULL;
279
280 new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL);
281 if (!new_shadow_plane_state)
282 return NULL;
283 __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
284
285 return &new_shadow_plane_state->base;
286 }
287 EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state);
288
289 /**
290 * __drm_gem_destroy_shadow_plane_state - cleans up shadow-buffered plane state
291 * @shadow_plane_state: the shadow-buffered plane state
292 *
293 * This function cleans up shadow-buffered plane state. Helpful for drivers that
294 * subclass struct drm_shadow_plane_state.
295 */
__drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state * shadow_plane_state)296 void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state)
297 {
298 __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base);
299 }
300 EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state);
301
302 /**
303 * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state
304 * @plane: the plane
305 * @plane_state: the plane state of type struct drm_shadow_plane_state
306 *
307 * This function implements struct &drm_plane_funcs.atomic_destroy_state
308 * for shadow-buffered planes. It expects that mappings of shadow buffers
309 * have been released already.
310 */
drm_gem_destroy_shadow_plane_state(struct drm_plane * plane,struct drm_plane_state * plane_state)311 void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
312 struct drm_plane_state *plane_state)
313 {
314 struct drm_shadow_plane_state *shadow_plane_state =
315 to_drm_shadow_plane_state(plane_state);
316
317 __drm_gem_destroy_shadow_plane_state(shadow_plane_state);
318 kfree(shadow_plane_state);
319 }
320 EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
321
322 /**
323 * __drm_gem_reset_shadow_plane - resets a shadow-buffered plane
324 * @plane: the plane
325 * @shadow_plane_state: the shadow-buffered plane state
326 *
327 * This function resets state for shadow-buffered planes. Helpful
328 * for drivers that subclass struct drm_shadow_plane_state.
329 */
__drm_gem_reset_shadow_plane(struct drm_plane * plane,struct drm_shadow_plane_state * shadow_plane_state)330 void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
331 struct drm_shadow_plane_state *shadow_plane_state)
332 {
333 __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
334 }
335 EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
336
337 /**
338 * drm_gem_reset_shadow_plane - resets a shadow-buffered plane
339 * @plane: the plane
340 *
341 * This function implements struct &drm_plane_funcs.reset_plane for
342 * shadow-buffered planes. It assumes the current plane state to be
343 * of type struct drm_shadow_plane and it allocates the new state of
344 * this type.
345 */
drm_gem_reset_shadow_plane(struct drm_plane * plane)346 void drm_gem_reset_shadow_plane(struct drm_plane *plane)
347 {
348 struct drm_shadow_plane_state *shadow_plane_state;
349
350 if (plane->state) {
351 drm_gem_destroy_shadow_plane_state(plane, plane->state);
352 plane->state = NULL; /* must be set to NULL here */
353 }
354
355 shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL);
356 if (!shadow_plane_state)
357 return;
358 __drm_gem_reset_shadow_plane(plane, shadow_plane_state);
359 }
360 EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
361
362 /**
363 * drm_gem_prepare_shadow_fb - prepares shadow framebuffers
364 * @plane: the plane
365 * @plane_state: the plane state of type struct drm_shadow_plane_state
366 *
367 * This function implements struct &drm_plane_helper_funcs.prepare_fb. It
368 * maps all buffer objects of the plane's framebuffer into kernel address
369 * space and stores them in &struct drm_shadow_plane_state.map. The
370 * framebuffer will be synchronized as part of the atomic commit.
371 *
372 * See drm_gem_cleanup_shadow_fb() for cleanup.
373 *
374 * Returns:
375 * 0 on success, or a negative errno code otherwise.
376 */
drm_gem_prepare_shadow_fb(struct drm_plane * plane,struct drm_plane_state * plane_state)377 int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
378 {
379 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
380 struct drm_framebuffer *fb = plane_state->fb;
381 int ret;
382
383 if (!fb)
384 return 0;
385
386 ret = drm_gem_plane_helper_prepare_fb(plane, plane_state);
387 if (ret)
388 return ret;
389
390 return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
391 }
392 EXPORT_SYMBOL(drm_gem_prepare_shadow_fb);
393
394 /**
395 * drm_gem_cleanup_shadow_fb - releases shadow framebuffers
396 * @plane: the plane
397 * @plane_state: the plane state of type struct drm_shadow_plane_state
398 *
399 * This function implements struct &drm_plane_helper_funcs.cleanup_fb.
400 * This function unmaps all buffer objects of the plane's framebuffer.
401 *
402 * See drm_gem_prepare_shadow_fb() for more information.
403 */
drm_gem_cleanup_shadow_fb(struct drm_plane * plane,struct drm_plane_state * plane_state)404 void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
405 {
406 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
407 struct drm_framebuffer *fb = plane_state->fb;
408
409 if (!fb)
410 return;
411
412 drm_gem_fb_vunmap(fb, shadow_plane_state->map);
413 }
414 EXPORT_SYMBOL(drm_gem_cleanup_shadow_fb);
415
416 /**
417 * drm_gem_simple_kms_prepare_shadow_fb - prepares shadow framebuffers
418 * @pipe: the simple display pipe
419 * @plane_state: the plane state of type struct drm_shadow_plane_state
420 *
421 * This function implements struct drm_simple_display_funcs.prepare_fb. It
422 * maps all buffer objects of the plane's framebuffer into kernel address
423 * space and stores them in struct drm_shadow_plane_state.map. The
424 * framebuffer will be synchronized as part of the atomic commit.
425 *
426 * See drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
427 *
428 * Returns:
429 * 0 on success, or a negative errno code otherwise.
430 */
drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe * pipe,struct drm_plane_state * plane_state)431 int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
432 struct drm_plane_state *plane_state)
433 {
434 return drm_gem_prepare_shadow_fb(&pipe->plane, plane_state);
435 }
436 EXPORT_SYMBOL(drm_gem_simple_kms_prepare_shadow_fb);
437
438 /**
439 * drm_gem_simple_kms_cleanup_shadow_fb - releases shadow framebuffers
440 * @pipe: the simple display pipe
441 * @plane_state: the plane state of type struct drm_shadow_plane_state
442 *
443 * This function implements struct drm_simple_display_funcs.cleanup_fb.
444 * This function unmaps all buffer objects of the plane's framebuffer.
445 *
446 * See drm_gem_simple_kms_prepare_shadow_fb().
447 */
drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe * pipe,struct drm_plane_state * plane_state)448 void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
449 struct drm_plane_state *plane_state)
450 {
451 drm_gem_cleanup_shadow_fb(&pipe->plane, plane_state);
452 }
453 EXPORT_SYMBOL(drm_gem_simple_kms_cleanup_shadow_fb);
454
455 /**
456 * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane
457 * @pipe: the simple display pipe
458 *
459 * This function implements struct drm_simple_display_funcs.reset_plane
460 * for shadow-buffered planes.
461 */
drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe * pipe)462 void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe)
463 {
464 drm_gem_reset_shadow_plane(&pipe->plane);
465 }
466 EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane);
467
468 /**
469 * drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
470 * @pipe: the simple display pipe
471 *
472 * This function implements struct drm_simple_display_funcs.duplicate_plane_state
473 * for shadow-buffered planes. It does not duplicate existing mappings of the shadow
474 * buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb
475 * and cleanup_fb helpers.
476 *
477 * Returns:
478 * A pointer to a new plane state on success, or NULL otherwise.
479 */
480 struct drm_plane_state *
drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe * pipe)481 drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe)
482 {
483 return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
484 }
485 EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state);
486
487 /**
488 * drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state
489 * @pipe: the simple display pipe
490 * @plane_state: the plane state of type struct drm_shadow_plane_state
491 *
492 * This function implements struct drm_simple_display_funcs.destroy_plane_state
493 * for shadow-buffered planes. It expects that mappings of shadow buffers
494 * have been released already.
495 */
drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe * pipe,struct drm_plane_state * plane_state)496 void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
497 struct drm_plane_state *plane_state)
498 {
499 drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
500 }
501 EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state);
502