1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2017 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include "vmwgfx_drv.h"
30 #include <linux/highmem.h>
31 
32 /*
33  * Template that implements find_first_diff() for a generic
34  * unsigned integer type. @size and return value are in bytes.
35  */
36 #define VMW_FIND_FIRST_DIFF(_type)			 \
37 static size_t vmw_find_first_diff_ ## _type		 \
38 	(const _type * dst, const _type * src, size_t size)\
39 {							 \
40 	size_t i;					 \
41 							 \
42 	for (i = 0; i < size; i += sizeof(_type)) {	 \
43 		if (*dst++ != *src++)			 \
44 			break;				 \
45 	}						 \
46 							 \
47 	return i;					 \
48 }
49 
50 
51 /*
52  * Template that implements find_last_diff() for a generic
53  * unsigned integer type. Pointers point to the item following the
54  * *end* of the area to be examined. @size and return value are in
55  * bytes.
56  */
57 #define VMW_FIND_LAST_DIFF(_type)					\
58 static ssize_t vmw_find_last_diff_ ## _type(				\
59 	const _type * dst, const _type * src, size_t size)		\
60 {									\
61 	while (size) {							\
62 		if (*--dst != *--src)					\
63 			break;						\
64 									\
65 		size -= sizeof(_type);					\
66 	}								\
67 	return size;							\
68 }
69 
70 
71 /*
72  * Instantiate find diff functions for relevant unsigned integer sizes,
73  * assuming that wider integers are faster (including aligning) up to the
74  * architecture native width, which is assumed to be 32 bit unless
75  * CONFIG_64BIT is defined.
76  */
77 VMW_FIND_FIRST_DIFF(u8);
78 VMW_FIND_LAST_DIFF(u8);
79 
80 VMW_FIND_FIRST_DIFF(u16);
81 VMW_FIND_LAST_DIFF(u16);
82 
83 VMW_FIND_FIRST_DIFF(u32);
84 VMW_FIND_LAST_DIFF(u32);
85 
86 #ifdef CONFIG_64BIT
87 VMW_FIND_FIRST_DIFF(u64);
88 VMW_FIND_LAST_DIFF(u64);
89 #endif
90 
91 
92 /* We use size aligned copies. This computes (addr - align(addr)) */
93 #define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
94 
95 
96 /*
97  * Template to compute find_first_diff() for a certain integer type
98  * including a head copy for alignment, and adjustment of parameters
99  * for tail find or increased resolution find using an unsigned integer find
100  * of smaller width. If finding is complete, and resolution is sufficient,
101  * the macro executes a return statement. Otherwise it falls through.
102  */
103 #define VMW_TRY_FIND_FIRST_DIFF(_type)					\
104 do {									\
105 	unsigned int spill = SPILL(dst, _type);				\
106 	size_t diff_offs;						\
107 									\
108 	if (spill && spill == SPILL(src, _type) &&			\
109 	    sizeof(_type) - spill <= size) {				\
110 		spill = sizeof(_type) - spill;				\
111 		diff_offs = vmw_find_first_diff_u8(dst, src, spill);	\
112 		if (diff_offs < spill)					\
113 			return round_down(offset + diff_offs, granularity); \
114 									\
115 		dst += spill;						\
116 		src += spill;						\
117 		size -= spill;						\
118 		offset += spill;					\
119 		spill = 0;						\
120 	}								\
121 	if (!spill && !SPILL(src, _type)) {				\
122 		size_t to_copy = size &	 ~(sizeof(_type) - 1);		\
123 									\
124 		diff_offs = vmw_find_first_diff_ ## _type		\
125 			((_type *) dst, (_type *) src, to_copy);	\
126 		if (diff_offs >= size || granularity == sizeof(_type))	\
127 			return (offset + diff_offs);			\
128 									\
129 		dst += diff_offs;					\
130 		src += diff_offs;					\
131 		size -= diff_offs;					\
132 		offset += diff_offs;					\
133 	}								\
134 } while (0)								\
135 
136 
137 /**
138  * vmw_find_first_diff - find the first difference between dst and src
139  *
140  * @dst: The destination address
141  * @src: The source address
142  * @size: Number of bytes to compare
143  * @granularity: The granularity needed for the return value in bytes.
144  * return: The offset from find start where the first difference was
145  * encountered in bytes. If no difference was found, the function returns
146  * a value >= @size.
147  */
vmw_find_first_diff(const u8 * dst,const u8 * src,size_t size,size_t granularity)148 static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size,
149 				  size_t granularity)
150 {
151 	size_t offset = 0;
152 
153 	/*
154 	 * Try finding with large integers if alignment allows, or we can
155 	 * fix it. Fall through if we need better resolution or alignment
156 	 * was bad.
157 	 */
158 #ifdef CONFIG_64BIT
159 	VMW_TRY_FIND_FIRST_DIFF(u64);
160 #endif
161 	VMW_TRY_FIND_FIRST_DIFF(u32);
162 	VMW_TRY_FIND_FIRST_DIFF(u16);
163 
164 	return round_down(offset + vmw_find_first_diff_u8(dst, src, size),
165 			  granularity);
166 }
167 
168 
169 /*
170  * Template to compute find_last_diff() for a certain integer type
171  * including a tail copy for alignment, and adjustment of parameters
172  * for head find or increased resolution find using an unsigned integer find
173  * of smaller width. If finding is complete, and resolution is sufficient,
174  * the macro executes a return statement. Otherwise it falls through.
175  */
176 #define VMW_TRY_FIND_LAST_DIFF(_type)					\
177 do {									\
178 	unsigned int spill = SPILL(dst, _type);				\
179 	ssize_t location;						\
180 	ssize_t diff_offs;						\
181 									\
182 	if (spill && spill <= size && spill == SPILL(src, _type)) {	\
183 		diff_offs = vmw_find_last_diff_u8(dst, src, spill);	\
184 		if (diff_offs) {					\
185 			location = size - spill + diff_offs - 1;	\
186 			return round_down(location, granularity);	\
187 		}							\
188 									\
189 		dst -= spill;						\
190 		src -= spill;						\
191 		size -= spill;						\
192 		spill = 0;						\
193 	}								\
194 	if (!spill && !SPILL(src, _type)) {				\
195 		size_t to_copy = round_down(size, sizeof(_type));	\
196 									\
197 		diff_offs = vmw_find_last_diff_ ## _type		\
198 			((_type *) dst, (_type *) src, to_copy);	\
199 		location = size - to_copy + diff_offs - sizeof(_type);	\
200 		if (location < 0 || granularity == sizeof(_type))	\
201 			return location;				\
202 									\
203 		dst -= to_copy - diff_offs;				\
204 		src -= to_copy - diff_offs;				\
205 		size -= to_copy - diff_offs;				\
206 	}								\
207 } while (0)
208 
209 
210 /**
211  * vmw_find_last_diff - find the last difference between dst and src
212  *
213  * @dst: The destination address
214  * @src: The source address
215  * @size: Number of bytes to compare
216  * @granularity: The granularity needed for the return value in bytes.
217  * return: The offset from find start where the last difference was
218  * encountered in bytes, or a negative value if no difference was found.
219  */
vmw_find_last_diff(const u8 * dst,const u8 * src,size_t size,size_t granularity)220 static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size,
221 				  size_t granularity)
222 {
223 	dst += size;
224 	src += size;
225 
226 #ifdef CONFIG_64BIT
227 	VMW_TRY_FIND_LAST_DIFF(u64);
228 #endif
229 	VMW_TRY_FIND_LAST_DIFF(u32);
230 	VMW_TRY_FIND_LAST_DIFF(u16);
231 
232 	return round_down(vmw_find_last_diff_u8(dst, src, size) - 1,
233 			  granularity);
234 }
235 
236 
237 /**
238  * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a
239  * struct vmw_diff_cpy.
240  *
241  * @diff: The struct vmw_diff_cpy closure argument (unused).
242  * @dest: The copy destination.
243  * @src: The copy source.
244  * @n: Number of bytes to copy.
245  */
vmw_memcpy(struct vmw_diff_cpy * diff,u8 * dest,const u8 * src,size_t n)246 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n)
247 {
248 	memcpy(dest, src, n);
249 }
250 
251 
252 /**
253  * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference
254  *
255  * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
256  * @diff_offs: The offset from @diff->line_offset where the difference was
257  * found.
258  */
vmw_adjust_rect(struct vmw_diff_cpy * diff,size_t diff_offs)259 static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs)
260 {
261 	size_t offs = (diff_offs + diff->line_offset) / diff->cpp;
262 	struct drm_rect *rect = &diff->rect;
263 
264 	rect->x1 = min_t(int, rect->x1, offs);
265 	rect->x2 = max_t(int, rect->x2, offs + 1);
266 	rect->y1 = min_t(int, rect->y1, diff->line);
267 	rect->y2 = max_t(int, rect->y2, diff->line + 1);
268 }
269 
270 /**
271  * vmw_diff_memcpy - memcpy that creates a bounding box of modified content.
272  *
273  * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
274  * @dest: The copy destination.
275  * @src: The copy source.
276  * @n: Number of bytes to copy.
277  *
278  * In order to correctly track the modified content, the field @diff->line must
279  * be pre-loaded with the current line number, the field @diff->line_offset must
280  * be pre-loaded with the line offset in bytes where the copy starts, and
281  * finally the field @diff->cpp need to be preloaded with the number of bytes
282  * per unit in the horizontal direction of the area we're examining.
283  * Typically bytes per pixel.
284  * This is needed to know the needed granularity of the difference computing
285  * operations. A higher cpp generally leads to faster execution at the cost of
286  * bounding box width precision.
287  */
vmw_diff_memcpy(struct vmw_diff_cpy * diff,u8 * dest,const u8 * src,size_t n)288 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
289 		     size_t n)
290 {
291 	ssize_t csize, byte_len;
292 
293 	if (WARN_ON_ONCE(round_down(n, diff->cpp) != n))
294 		return;
295 
296 	/* TODO: Possibly use a single vmw_find_first_diff per line? */
297 	csize = vmw_find_first_diff(dest, src, n, diff->cpp);
298 	if (csize < n) {
299 		vmw_adjust_rect(diff, csize);
300 		byte_len = diff->cpp;
301 
302 		/*
303 		 * Starting from where first difference was found, find
304 		 * location of last difference, and then copy.
305 		 */
306 		diff->line_offset += csize;
307 		dest += csize;
308 		src += csize;
309 		n -= csize;
310 		csize = vmw_find_last_diff(dest, src, n, diff->cpp);
311 		if (csize >= 0) {
312 			byte_len += csize;
313 			vmw_adjust_rect(diff, csize);
314 		}
315 		memcpy(dest, src, byte_len);
316 	}
317 	diff->line_offset += n;
318 }
319 
320 /**
321  * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line
322  *
323  * @mapped_dst: Already mapped destination page index in @dst_pages.
324  * @dst_addr: Kernel virtual address of mapped destination page.
325  * @dst_pages: Array of destination bo pages.
326  * @dst_num_pages: Number of destination bo pages.
327  * @dst_prot: Destination bo page protection.
328  * @mapped_src: Already mapped source page index in @dst_pages.
329  * @src_addr: Kernel virtual address of mapped source page.
330  * @src_pages: Array of source bo pages.
331  * @src_num_pages: Number of source bo pages.
332  * @src_prot: Source bo page protection.
333  * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine.
334  */
335 struct vmw_bo_blit_line_data {
336 	u32 mapped_dst;
337 	u8 *dst_addr;
338 	struct page **dst_pages;
339 	u32 dst_num_pages;
340 	pgprot_t dst_prot;
341 	u32 mapped_src;
342 	u8 *src_addr;
343 	struct page **src_pages;
344 	u32 src_num_pages;
345 	pgprot_t src_prot;
346 	struct vmw_diff_cpy *diff;
347 };
348 
349 /**
350  * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another.
351  *
352  * @d: Blit data as described above.
353  * @dst_offset: Destination copy start offset from start of bo.
354  * @src_offset: Source copy start offset from start of bo.
355  * @bytes_to_copy: Number of bytes to copy in this line.
356  */
vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data * d,u32 dst_offset,u32 src_offset,u32 bytes_to_copy)357 static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
358 				u32 dst_offset,
359 				u32 src_offset,
360 				u32 bytes_to_copy)
361 {
362 	struct vmw_diff_cpy *diff = d->diff;
363 
364 	while (bytes_to_copy) {
365 		u32 copy_size = bytes_to_copy;
366 		u32 dst_page = dst_offset >> PAGE_SHIFT;
367 		u32 src_page = src_offset >> PAGE_SHIFT;
368 		u32 dst_page_offset = dst_offset & ~PAGE_MASK;
369 		u32 src_page_offset = src_offset & ~PAGE_MASK;
370 		bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst;
371 		bool unmap_src = d->src_addr && (src_page != d->mapped_src ||
372 						 unmap_dst);
373 
374 		copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
375 		copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
376 
377 		if (unmap_src) {
378 			kunmap_atomic(d->src_addr);
379 			d->src_addr = NULL;
380 		}
381 
382 		if (unmap_dst) {
383 			kunmap_atomic(d->dst_addr);
384 			d->dst_addr = NULL;
385 		}
386 
387 		if (!d->dst_addr) {
388 			if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
389 				return -EINVAL;
390 
391 			d->dst_addr =
392 				kmap_atomic_prot(d->dst_pages[dst_page],
393 						 d->dst_prot);
394 			if (!d->dst_addr)
395 				return -ENOMEM;
396 
397 			d->mapped_dst = dst_page;
398 		}
399 
400 		if (!d->src_addr) {
401 			if (WARN_ON_ONCE(src_page >= d->src_num_pages))
402 				return -EINVAL;
403 
404 			d->src_addr =
405 				kmap_atomic_prot(d->src_pages[src_page],
406 						 d->src_prot);
407 			if (!d->src_addr)
408 				return -ENOMEM;
409 
410 			d->mapped_src = src_page;
411 		}
412 		diff->do_cpy(diff, d->dst_addr + dst_page_offset,
413 			     d->src_addr + src_page_offset, copy_size);
414 
415 		bytes_to_copy -= copy_size;
416 		dst_offset += copy_size;
417 		src_offset += copy_size;
418 	}
419 
420 	return 0;
421 }
422 
423 /**
424  * vmw_bo_cpu_blit - in-kernel cpu blit.
425  *
426  * @dst: Destination buffer object.
427  * @dst_offset: Destination offset of blit start in bytes.
428  * @dst_stride: Destination stride in bytes.
429  * @src: Source buffer object.
430  * @src_offset: Source offset of blit start in bytes.
431  * @src_stride: Source stride in bytes.
432  * @w: Width of blit.
433  * @h: Height of blit.
434  * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
435  * return: Zero on success. Negative error value on failure. Will print out
436  * kernel warnings on caller bugs.
437  *
438  * Performs a CPU blit from one buffer object to another avoiding a full
439  * bo vmap which may exhaust- or fragment vmalloc space.
440  * On supported architectures (x86), we're using kmap_atomic which avoids
441  * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
442  * reference already set-up mappings.
443  *
444  * Neither of the buffer objects may be placed in PCI memory
445  * (Fixed memory in TTM terminology) when using this function.
446  */
vmw_bo_cpu_blit(struct ttm_buffer_object * dst,u32 dst_offset,u32 dst_stride,struct ttm_buffer_object * src,u32 src_offset,u32 src_stride,u32 w,u32 h,struct vmw_diff_cpy * diff)447 int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
448 		    u32 dst_offset, u32 dst_stride,
449 		    struct ttm_buffer_object *src,
450 		    u32 src_offset, u32 src_stride,
451 		    u32 w, u32 h,
452 		    struct vmw_diff_cpy *diff)
453 {
454 	struct ttm_operation_ctx ctx = {
455 		.interruptible = false,
456 		.no_wait_gpu = false
457 	};
458 	u32 j, initial_line = dst_offset / dst_stride;
459 	struct vmw_bo_blit_line_data d;
460 	int ret = 0;
461 
462 	/* Buffer objects need to be either pinned or reserved: */
463 	if (!(dst->pin_count))
464 		dma_resv_assert_held(dst->base.resv);
465 	if (!(src->pin_count))
466 		dma_resv_assert_held(src->base.resv);
467 
468 	if (!ttm_tt_is_populated(dst->ttm)) {
469 		ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
470 		if (ret)
471 			return ret;
472 	}
473 
474 	if (!ttm_tt_is_populated(src->ttm)) {
475 		ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx);
476 		if (ret)
477 			return ret;
478 	}
479 
480 	d.mapped_dst = 0;
481 	d.mapped_src = 0;
482 	d.dst_addr = NULL;
483 	d.src_addr = NULL;
484 	d.dst_pages = dst->ttm->pages;
485 	d.src_pages = src->ttm->pages;
486 	d.dst_num_pages = PFN_UP(dst->resource->size);
487 	d.src_num_pages = PFN_UP(src->resource->size);
488 	d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
489 	d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
490 	d.diff = diff;
491 
492 	for (j = 0; j < h; ++j) {
493 		diff->line = j + initial_line;
494 		diff->line_offset = dst_offset % dst_stride;
495 		ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w);
496 		if (ret)
497 			goto out;
498 
499 		dst_offset += dst_stride;
500 		src_offset += src_stride;
501 	}
502 out:
503 	if (d.src_addr)
504 		kunmap_atomic(d.src_addr);
505 	if (d.dst_addr)
506 		kunmap_atomic(d.dst_addr);
507 
508 	return ret;
509 }
510