1 /**********************************************************
2 * Copyright 2021 VMware, Inc.
3 * SPDX-License-Identifier: GPL-2.0 OR MIT
4 *
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use, copy,
9 * modify, merge, publish, distribute, sublicense, and/or sell copies
10 * of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 **********************************************************/
26
27 #ifndef VMW_SURFACE_CACHE_H
28 #define VMW_SURFACE_CACHE_H
29
30 #include "device_include/svga3d_surfacedefs.h"
31
32 #include <drm/vmwgfx_drm.h>
33
clamped_umul32(u32 a,u32 b)34 static inline u32 clamped_umul32(u32 a, u32 b)
35 {
36 uint64_t tmp = (uint64_t) a*b;
37 return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
38 }
39
40 /**
41 * vmw_surface_get_desc - Look up the appropriate SVGA3dSurfaceDesc for the
42 * given format.
43 */
44 static inline const SVGA3dSurfaceDesc *
vmw_surface_get_desc(SVGA3dSurfaceFormat format)45 vmw_surface_get_desc(SVGA3dSurfaceFormat format)
46 {
47 if (format < ARRAY_SIZE(g_SVGA3dSurfaceDescs))
48 return &g_SVGA3dSurfaceDescs[format];
49
50 return &g_SVGA3dSurfaceDescs[SVGA3D_FORMAT_INVALID];
51 }
52
53 /**
54 * vmw_surface_get_mip_size - Given a base level size and the mip level,
55 * compute the size of the mip level.
56 */
57 static inline struct drm_vmw_size
vmw_surface_get_mip_size(struct drm_vmw_size base_level,u32 mip_level)58 vmw_surface_get_mip_size(struct drm_vmw_size base_level, u32 mip_level)
59 {
60 struct drm_vmw_size size = {
61 .width = max_t(u32, base_level.width >> mip_level, 1),
62 .height = max_t(u32, base_level.height >> mip_level, 1),
63 .depth = max_t(u32, base_level.depth >> mip_level, 1)
64 };
65
66 return size;
67 }
68
69 static inline void
vmw_surface_get_size_in_blocks(const SVGA3dSurfaceDesc * desc,const struct drm_vmw_size * pixel_size,SVGA3dSize * block_size)70 vmw_surface_get_size_in_blocks(const SVGA3dSurfaceDesc *desc,
71 const struct drm_vmw_size *pixel_size,
72 SVGA3dSize *block_size)
73 {
74 block_size->width = __KERNEL_DIV_ROUND_UP(pixel_size->width,
75 desc->blockSize.width);
76 block_size->height = __KERNEL_DIV_ROUND_UP(pixel_size->height,
77 desc->blockSize.height);
78 block_size->depth = __KERNEL_DIV_ROUND_UP(pixel_size->depth,
79 desc->blockSize.depth);
80 }
81
82 static inline bool
vmw_surface_is_planar_surface(const SVGA3dSurfaceDesc * desc)83 vmw_surface_is_planar_surface(const SVGA3dSurfaceDesc *desc)
84 {
85 return (desc->blockDesc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
86 }
87
88 static inline u32
vmw_surface_calculate_pitch(const SVGA3dSurfaceDesc * desc,const struct drm_vmw_size * size)89 vmw_surface_calculate_pitch(const SVGA3dSurfaceDesc *desc,
90 const struct drm_vmw_size *size)
91 {
92 u32 pitch;
93 SVGA3dSize blocks;
94
95 vmw_surface_get_size_in_blocks(desc, size, &blocks);
96
97 pitch = blocks.width * desc->pitchBytesPerBlock;
98
99 return pitch;
100 }
101
102 /**
103 * vmw_surface_get_image_buffer_size - Calculates image buffer size.
104 *
105 * Return the number of bytes of buffer space required to store one image of a
106 * surface, optionally using the specified pitch.
107 *
108 * If pitch is zero, it is assumed that rows are tightly packed.
109 *
110 * This function is overflow-safe. If the result would have overflowed, instead
111 * we return MAX_UINT32.
112 */
113 static inline u32
vmw_surface_get_image_buffer_size(const SVGA3dSurfaceDesc * desc,const struct drm_vmw_size * size,u32 pitch)114 vmw_surface_get_image_buffer_size(const SVGA3dSurfaceDesc *desc,
115 const struct drm_vmw_size *size,
116 u32 pitch)
117 {
118 SVGA3dSize image_blocks;
119 u32 slice_size, total_size;
120
121 vmw_surface_get_size_in_blocks(desc, size, &image_blocks);
122
123 if (vmw_surface_is_planar_surface(desc)) {
124 total_size = clamped_umul32(image_blocks.width,
125 image_blocks.height);
126 total_size = clamped_umul32(total_size, image_blocks.depth);
127 total_size = clamped_umul32(total_size, desc->bytesPerBlock);
128 return total_size;
129 }
130
131 if (pitch == 0)
132 pitch = vmw_surface_calculate_pitch(desc, size);
133
134 slice_size = clamped_umul32(image_blocks.height, pitch);
135 total_size = clamped_umul32(slice_size, image_blocks.depth);
136
137 return total_size;
138 }
139
140 /**
141 * vmw_surface_get_serialized_size - Get the serialized size for the image.
142 */
143 static inline u32
vmw_surface_get_serialized_size(SVGA3dSurfaceFormat format,struct drm_vmw_size base_level_size,u32 num_mip_levels,u32 num_layers)144 vmw_surface_get_serialized_size(SVGA3dSurfaceFormat format,
145 struct drm_vmw_size base_level_size,
146 u32 num_mip_levels,
147 u32 num_layers)
148 {
149 const SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
150 u32 total_size = 0;
151 u32 mip;
152
153 for (mip = 0; mip < num_mip_levels; mip++) {
154 struct drm_vmw_size size =
155 vmw_surface_get_mip_size(base_level_size, mip);
156 total_size += vmw_surface_get_image_buffer_size(desc,
157 &size, 0);
158 }
159
160 return total_size * num_layers;
161 }
162
163 /**
164 * vmw_surface_get_serialized_size_extended - Returns the number of bytes
165 * required for a surface with given parameters. Support for sample count.
166 */
167 static inline u32
vmw_surface_get_serialized_size_extended(SVGA3dSurfaceFormat format,struct drm_vmw_size base_level_size,u32 num_mip_levels,u32 num_layers,u32 num_samples)168 vmw_surface_get_serialized_size_extended(SVGA3dSurfaceFormat format,
169 struct drm_vmw_size base_level_size,
170 u32 num_mip_levels,
171 u32 num_layers,
172 u32 num_samples)
173 {
174 uint64_t total_size =
175 vmw_surface_get_serialized_size(format,
176 base_level_size,
177 num_mip_levels,
178 num_layers);
179 total_size *= max_t(u32, 1, num_samples);
180
181 return min_t(uint64_t, total_size, (uint64_t)U32_MAX);
182 }
183
184 /**
185 * vmw_surface_get_pixel_offset - Compute the offset (in bytes) to a pixel
186 * in an image (or volume).
187 *
188 * @width: The image width in pixels.
189 * @height: The image height in pixels
190 */
191 static inline u32
vmw_surface_get_pixel_offset(SVGA3dSurfaceFormat format,u32 width,u32 height,u32 x,u32 y,u32 z)192 vmw_surface_get_pixel_offset(SVGA3dSurfaceFormat format,
193 u32 width, u32 height,
194 u32 x, u32 y, u32 z)
195 {
196 const SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
197 const u32 bw = desc->blockSize.width, bh = desc->blockSize.height;
198 const u32 bd = desc->blockSize.depth;
199 const u32 rowstride = __KERNEL_DIV_ROUND_UP(width, bw) *
200 desc->bytesPerBlock;
201 const u32 imgstride = __KERNEL_DIV_ROUND_UP(height, bh) * rowstride;
202 const u32 offset = (z / bd * imgstride +
203 y / bh * rowstride +
204 x / bw * desc->bytesPerBlock);
205 return offset;
206 }
207
208 static inline u32
vmw_surface_get_image_offset(SVGA3dSurfaceFormat format,struct drm_vmw_size baseLevelSize,u32 numMipLevels,u32 face,u32 mip)209 vmw_surface_get_image_offset(SVGA3dSurfaceFormat format,
210 struct drm_vmw_size baseLevelSize,
211 u32 numMipLevels,
212 u32 face,
213 u32 mip)
214
215 {
216 u32 offset;
217 u32 mipChainBytes;
218 u32 mipChainBytesToLevel;
219 u32 i;
220 const SVGA3dSurfaceDesc *desc;
221 struct drm_vmw_size mipSize;
222 u32 bytes;
223
224 desc = vmw_surface_get_desc(format);
225
226 mipChainBytes = 0;
227 mipChainBytesToLevel = 0;
228 for (i = 0; i < numMipLevels; i++) {
229 mipSize = vmw_surface_get_mip_size(baseLevelSize, i);
230 bytes = vmw_surface_get_image_buffer_size(desc, &mipSize, 0);
231 mipChainBytes += bytes;
232 if (i < mip)
233 mipChainBytesToLevel += bytes;
234 }
235
236 offset = mipChainBytes * face + mipChainBytesToLevel;
237
238 return offset;
239 }
240
241
242 /**
243 * vmw_surface_is_gb_screen_target_format - Is the specified format usable as
244 * a ScreenTarget?
245 * (with just the GBObjects cap-bit
246 * set)
247 * @format: format to queried
248 *
249 * RETURNS:
250 * true if queried format is valid for screen targets
251 */
252 static inline bool
vmw_surface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)253 vmw_surface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
254 {
255 return (format == SVGA3D_X8R8G8B8 ||
256 format == SVGA3D_A8R8G8B8 ||
257 format == SVGA3D_R5G6B5 ||
258 format == SVGA3D_X1R5G5B5 ||
259 format == SVGA3D_A1R5G5B5 ||
260 format == SVGA3D_P8);
261 }
262
263
264 /**
265 * vmw_surface_is_dx_screen_target_format - Is the specified format usable as
266 * a ScreenTarget?
267 * (with DX10 enabled)
268 *
269 * @format: format to queried
270 *
271 * Results:
272 * true if queried format is valid for screen targets
273 */
274 static inline bool
vmw_surface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)275 vmw_surface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
276 {
277 return (format == SVGA3D_R8G8B8A8_UNORM ||
278 format == SVGA3D_B8G8R8A8_UNORM ||
279 format == SVGA3D_B8G8R8X8_UNORM);
280 }
281
282
283 /**
284 * vmw_surface_is_screen_target_format - Is the specified format usable as a
285 * ScreenTarget?
286 * (for some combination of caps)
287 *
288 * @format: format to queried
289 *
290 * Results:
291 * true if queried format is valid for screen targets
292 */
293 static inline bool
vmw_surface_is_screen_target_format(SVGA3dSurfaceFormat format)294 vmw_surface_is_screen_target_format(SVGA3dSurfaceFormat format)
295 {
296 if (vmw_surface_is_gb_screen_target_format(format)) {
297 return true;
298 }
299 return vmw_surface_is_dx_screen_target_format(format);
300 }
301
302 /**
303 * struct vmw_surface_mip - Mimpmap level information
304 * @bytes: Bytes required in the backing store of this mipmap level.
305 * @img_stride: Byte stride per image.
306 * @row_stride: Byte stride per block row.
307 * @size: The size of the mipmap.
308 */
309 struct vmw_surface_mip {
310 size_t bytes;
311 size_t img_stride;
312 size_t row_stride;
313 struct drm_vmw_size size;
314
315 };
316
317 /**
318 * struct vmw_surface_cache - Cached surface information
319 * @desc: Pointer to the surface descriptor
320 * @mip: Array of mipmap level information. Valid size is @num_mip_levels.
321 * @mip_chain_bytes: Bytes required in the backing store for the whole chain
322 * of mip levels.
323 * @sheet_bytes: Bytes required in the backing store for a sheet
324 * representing a single sample.
325 * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in
326 * a chain.
327 * @num_layers: Number of slices in an array texture or number of faces in
328 * a cubemap texture.
329 */
330 struct vmw_surface_cache {
331 const SVGA3dSurfaceDesc *desc;
332 struct vmw_surface_mip mip[DRM_VMW_MAX_MIP_LEVELS];
333 size_t mip_chain_bytes;
334 size_t sheet_bytes;
335 u32 num_mip_levels;
336 u32 num_layers;
337 };
338
339 /**
340 * struct vmw_surface_loc - Surface location
341 * @sheet: The multisample sheet.
342 * @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
343 * mip_level.
344 * @x: X coordinate.
345 * @y: Y coordinate.
346 * @z: Z coordinate.
347 */
348 struct vmw_surface_loc {
349 u32 sheet;
350 u32 sub_resource;
351 u32 x, y, z;
352 };
353
354 /**
355 * vmw_surface_subres - Compute the subresource from layer and mipmap.
356 * @cache: Surface layout data.
357 * @mip_level: The mipmap level.
358 * @layer: The surface layer (face or array slice).
359 *
360 * Return: The subresource.
361 */
vmw_surface_subres(const struct vmw_surface_cache * cache,u32 mip_level,u32 layer)362 static inline u32 vmw_surface_subres(const struct vmw_surface_cache *cache,
363 u32 mip_level, u32 layer)
364 {
365 return cache->num_mip_levels * layer + mip_level;
366 }
367
368 /**
369 * vmw_surface_setup_cache - Build a surface cache entry
370 * @size: The surface base level dimensions.
371 * @format: The surface format.
372 * @num_mip_levels: Number of mipmap levels.
373 * @num_layers: Number of layers.
374 * @cache: Pointer to a struct vmw_surface_cach object to be filled in.
375 *
376 * Return: Zero on success, -EINVAL on invalid surface layout.
377 */
vmw_surface_setup_cache(const struct drm_vmw_size * size,SVGA3dSurfaceFormat format,u32 num_mip_levels,u32 num_layers,u32 num_samples,struct vmw_surface_cache * cache)378 static inline int vmw_surface_setup_cache(const struct drm_vmw_size *size,
379 SVGA3dSurfaceFormat format,
380 u32 num_mip_levels,
381 u32 num_layers,
382 u32 num_samples,
383 struct vmw_surface_cache *cache)
384 {
385 const SVGA3dSurfaceDesc *desc;
386 u32 i;
387
388 memset(cache, 0, sizeof(*cache));
389 cache->desc = desc = vmw_surface_get_desc(format);
390 cache->num_mip_levels = num_mip_levels;
391 cache->num_layers = num_layers;
392 for (i = 0; i < cache->num_mip_levels; i++) {
393 struct vmw_surface_mip *mip = &cache->mip[i];
394
395 mip->size = vmw_surface_get_mip_size(*size, i);
396 mip->bytes = vmw_surface_get_image_buffer_size
397 (desc, &mip->size, 0);
398 mip->row_stride =
399 __KERNEL_DIV_ROUND_UP(mip->size.width,
400 desc->blockSize.width) *
401 desc->bytesPerBlock * num_samples;
402 if (!mip->row_stride)
403 goto invalid_dim;
404
405 mip->img_stride =
406 __KERNEL_DIV_ROUND_UP(mip->size.height,
407 desc->blockSize.height) *
408 mip->row_stride;
409 if (!mip->img_stride)
410 goto invalid_dim;
411
412 cache->mip_chain_bytes += mip->bytes;
413 }
414 cache->sheet_bytes = cache->mip_chain_bytes * num_layers;
415 if (!cache->sheet_bytes)
416 goto invalid_dim;
417
418 return 0;
419
420 invalid_dim:
421 VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n");
422 return -EINVAL;
423 }
424
425 /**
426 * vmw_surface_get_loc - Get a surface location from an offset into the
427 * backing store
428 * @cache: Surface layout data.
429 * @loc: Pointer to a struct vmw_surface_loc to be filled in.
430 * @offset: Offset into the surface backing store.
431 */
432 static inline void
vmw_surface_get_loc(const struct vmw_surface_cache * cache,struct vmw_surface_loc * loc,size_t offset)433 vmw_surface_get_loc(const struct vmw_surface_cache *cache,
434 struct vmw_surface_loc *loc,
435 size_t offset)
436 {
437 const struct vmw_surface_mip *mip = &cache->mip[0];
438 const SVGA3dSurfaceDesc *desc = cache->desc;
439 u32 layer;
440 int i;
441
442 loc->sheet = offset / cache->sheet_bytes;
443 offset -= loc->sheet * cache->sheet_bytes;
444
445 layer = offset / cache->mip_chain_bytes;
446 offset -= layer * cache->mip_chain_bytes;
447 for (i = 0; i < cache->num_mip_levels; ++i, ++mip) {
448 if (mip->bytes > offset)
449 break;
450 offset -= mip->bytes;
451 }
452
453 loc->sub_resource = vmw_surface_subres(cache, i, layer);
454 loc->z = offset / mip->img_stride;
455 offset -= loc->z * mip->img_stride;
456 loc->z *= desc->blockSize.depth;
457 loc->y = offset / mip->row_stride;
458 offset -= loc->y * mip->row_stride;
459 loc->y *= desc->blockSize.height;
460 loc->x = offset / desc->bytesPerBlock;
461 loc->x *= desc->blockSize.width;
462 }
463
464 /**
465 * vmw_surface_inc_loc - Clamp increment a surface location with one block
466 * size
467 * in each dimension.
468 * @loc: Pointer to a struct vmw_surface_loc to be incremented.
469 *
470 * When computing the size of a range as size = end - start, the range does not
471 * include the end element. However a location representing the last byte
472 * of a touched region in the backing store *is* included in the range.
473 * This function modifies such a location to match the end definition
474 * given as start + size which is the one used in a SVGA3dBox.
475 */
476 static inline void
vmw_surface_inc_loc(const struct vmw_surface_cache * cache,struct vmw_surface_loc * loc)477 vmw_surface_inc_loc(const struct vmw_surface_cache *cache,
478 struct vmw_surface_loc *loc)
479 {
480 const SVGA3dSurfaceDesc *desc = cache->desc;
481 u32 mip = loc->sub_resource % cache->num_mip_levels;
482 const struct drm_vmw_size *size = &cache->mip[mip].size;
483
484 loc->sub_resource++;
485 loc->x += desc->blockSize.width;
486 if (loc->x > size->width)
487 loc->x = size->width;
488 loc->y += desc->blockSize.height;
489 if (loc->y > size->height)
490 loc->y = size->height;
491 loc->z += desc->blockSize.depth;
492 if (loc->z > size->depth)
493 loc->z = size->depth;
494 }
495
496 /**
497 * vmw_surface_min_loc - The start location in a subresource
498 * @cache: Surface layout data.
499 * @sub_resource: The subresource.
500 * @loc: Pointer to a struct vmw_surface_loc to be filled in.
501 */
502 static inline void
vmw_surface_min_loc(const struct vmw_surface_cache * cache,u32 sub_resource,struct vmw_surface_loc * loc)503 vmw_surface_min_loc(const struct vmw_surface_cache *cache,
504 u32 sub_resource,
505 struct vmw_surface_loc *loc)
506 {
507 loc->sheet = 0;
508 loc->sub_resource = sub_resource;
509 loc->x = loc->y = loc->z = 0;
510 }
511
512 /**
513 * vmw_surface_min_loc - The end location in a subresource
514 * @cache: Surface layout data.
515 * @sub_resource: The subresource.
516 * @loc: Pointer to a struct vmw_surface_loc to be filled in.
517 *
518 * Following the end definition given in vmw_surface_inc_loc(),
519 * Compute the end location of a surface subresource.
520 */
521 static inline void
vmw_surface_max_loc(const struct vmw_surface_cache * cache,u32 sub_resource,struct vmw_surface_loc * loc)522 vmw_surface_max_loc(const struct vmw_surface_cache *cache,
523 u32 sub_resource,
524 struct vmw_surface_loc *loc)
525 {
526 const struct drm_vmw_size *size;
527 u32 mip;
528
529 loc->sheet = 0;
530 loc->sub_resource = sub_resource + 1;
531 mip = sub_resource % cache->num_mip_levels;
532 size = &cache->mip[mip].size;
533 loc->x = size->width;
534 loc->y = size->height;
535 loc->z = size->depth;
536 }
537
538
539 #endif /* VMW_SURFACE_CACHE_H */
540