1 /**************************************************************************
3 * Copyright © 2017 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
31 * Template that implements find_first_diff() for a generic
32 * unsigned integer type. @size and return value are in bytes.
34 #define VMW_FIND_FIRST_DIFF(_type) \
35 static size_t vmw_find_first_diff_ ## _type \
36 (const _type * dst, const _type * src, size_t size)\
40 for (i = 0; i < size; i += sizeof(_type)) { \
41 if (*dst++ != *src++) \
50 * Template that implements find_last_diff() for a generic
51 * unsigned integer type. Pointers point to the item following the
52 * *end* of the area to be examined. @size and return value are in
55 #define VMW_FIND_LAST_DIFF(_type) \
56 static ssize_t vmw_find_last_diff_ ## _type( \
57 const _type * dst, const _type * src, size_t size) \
60 if (*--dst != *--src) \
63 size -= sizeof(_type); \
70 * Instantiate find diff functions for relevant unsigned integer sizes,
71 * assuming that wider integers are faster (including aligning) up to the
72 * architecture native width, which is assumed to be 32 bit unless
73 * CONFIG_64BIT is defined.
75 VMW_FIND_FIRST_DIFF(u8);
76 VMW_FIND_LAST_DIFF(u8);
78 VMW_FIND_FIRST_DIFF(u16);
79 VMW_FIND_LAST_DIFF(u16);
81 VMW_FIND_FIRST_DIFF(u32);
82 VMW_FIND_LAST_DIFF(u32);
85 VMW_FIND_FIRST_DIFF(u64);
86 VMW_FIND_LAST_DIFF(u64);
90 /* We use size aligned copies. This computes (addr - align(addr)) */
91 #define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
95 * Template to compute find_first_diff() for a certain integer type
96 * including a head copy for alignment, and adjustment of parameters
97 * for tail find or increased resolution find using an unsigned integer find
98 * of smaller width. If finding is complete, and resolution is sufficient,
99 * the macro executes a return statement. Otherwise it falls through.
101 #define VMW_TRY_FIND_FIRST_DIFF(_type) \
103 unsigned int spill = SPILL(dst, _type); \
106 if (spill && spill == SPILL(src, _type) && \
107 sizeof(_type) - spill <= size) { \
108 spill = sizeof(_type) - spill; \
109 diff_offs = vmw_find_first_diff_u8(dst, src, spill); \
110 if (diff_offs < spill) \
111 return round_down(offset + diff_offs, granularity); \
119 if (!spill && !SPILL(src, _type)) { \
120 size_t to_copy = size & ~(sizeof(_type) - 1); \
122 diff_offs = vmw_find_first_diff_ ## _type \
123 ((_type *) dst, (_type *) src, to_copy); \
124 if (diff_offs >= size || granularity == sizeof(_type)) \
125 return (offset + diff_offs); \
130 offset += diff_offs; \
136 * vmw_find_first_diff - find the first difference between dst and src
138 * @dst: The destination address
139 * @src: The source address
140 * @size: Number of bytes to compare
141 * @granularity: The granularity needed for the return value in bytes.
142 * return: The offset from find start where the first difference was
143 * encountered in bytes. If no difference was found, the function returns
146 static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size,
152 * Try finding with large integers if alignment allows, or we can
153 * fix it. Fall through if we need better resolution or alignment
157 VMW_TRY_FIND_FIRST_DIFF(u64);
159 VMW_TRY_FIND_FIRST_DIFF(u32);
160 VMW_TRY_FIND_FIRST_DIFF(u16);
162 return round_down(offset + vmw_find_first_diff_u8(dst, src, size),
168 * Template to compute find_last_diff() for a certain integer type
169 * including a tail copy for alignment, and adjustment of parameters
170 * for head find or increased resolution find using an unsigned integer find
171 * of smaller width. If finding is complete, and resolution is sufficient,
172 * the macro executes a return statement. Otherwise it falls through.
174 #define VMW_TRY_FIND_LAST_DIFF(_type) \
176 unsigned int spill = SPILL(dst, _type); \
180 if (spill && spill <= size && spill == SPILL(src, _type)) { \
181 diff_offs = vmw_find_last_diff_u8(dst, src, spill); \
183 location = size - spill + diff_offs - 1; \
184 return round_down(location, granularity); \
192 if (!spill && !SPILL(src, _type)) { \
193 size_t to_copy = round_down(size, sizeof(_type)); \
195 diff_offs = vmw_find_last_diff_ ## _type \
196 ((_type *) dst, (_type *) src, to_copy); \
197 location = size - to_copy + diff_offs - sizeof(_type); \
198 if (location < 0 || granularity == sizeof(_type)) \
201 dst -= to_copy - diff_offs; \
202 src -= to_copy - diff_offs; \
203 size -= to_copy - diff_offs; \
209 * vmw_find_last_diff - find the last difference between dst and src
211 * @dst: The destination address
212 * @src: The source address
213 * @size: Number of bytes to compare
214 * @granularity: The granularity needed for the return value in bytes.
215 * return: The offset from find start where the last difference was
216 * encountered in bytes, or a negative value if no difference was found.
218 static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size,
225 VMW_TRY_FIND_LAST_DIFF(u64);
227 VMW_TRY_FIND_LAST_DIFF(u32);
228 VMW_TRY_FIND_LAST_DIFF(u16);
230 return round_down(vmw_find_last_diff_u8(dst, src, size) - 1,
236 * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a
237 * struct vmw_diff_cpy.
239 * @diff: The struct vmw_diff_cpy closure argument (unused).
240 * @dest: The copy destination.
241 * @src: The copy source.
242 * @n: Number of bytes to copy.
244 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n)
246 memcpy(dest, src, n);
251 * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference
253 * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
254 * @diff_offs: The offset from @diff->line_offset where the difference was
257 static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs)
259 size_t offs = (diff_offs + diff->line_offset) / diff->cpp;
260 struct drm_rect *rect = &diff->rect;
262 rect->x1 = min_t(int, rect->x1, offs);
263 rect->x2 = max_t(int, rect->x2, offs + 1);
264 rect->y1 = min_t(int, rect->y1, diff->line);
265 rect->y2 = max_t(int, rect->y2, diff->line + 1);
269 * vmw_diff_memcpy - memcpy that creates a bounding box of modified content.
271 * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
272 * @dest: The copy destination.
273 * @src: The copy source.
274 * @n: Number of bytes to copy.
276 * In order to correctly track the modified content, the field @diff->line must
277 * be pre-loaded with the current line number, the field @diff->line_offset must
278 * be pre-loaded with the line offset in bytes where the copy starts, and
279 * finally the field @diff->cpp need to be preloaded with the number of bytes
280 * per unit in the horizontal direction of the area we're examining.
281 * Typically bytes per pixel.
282 * This is needed to know the needed granularity of the difference computing
283 * operations. A higher cpp generally leads to faster execution at the cost of
284 * bounding box width precision.
286 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
289 ssize_t csize, byte_len;
291 if (WARN_ON_ONCE(round_down(n, diff->cpp) != n))
294 /* TODO: Possibly use a single vmw_find_first_diff per line? */
295 csize = vmw_find_first_diff(dest, src, n, diff->cpp);
297 vmw_adjust_rect(diff, csize);
298 byte_len = diff->cpp;
301 * Starting from where first difference was found, find
302 * location of last difference, and then copy.
304 diff->line_offset += csize;
308 csize = vmw_find_last_diff(dest, src, n, diff->cpp);
311 vmw_adjust_rect(diff, csize);
313 memcpy(dest, src, byte_len);
315 diff->line_offset += n;
319 * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line
321 * @mapped_dst: Already mapped destination page index in @dst_pages.
322 * @dst_addr: Kernel virtual address of mapped destination page.
323 * @dst_pages: Array of destination bo pages.
324 * @dst_num_pages: Number of destination bo pages.
325 * @dst_prot: Destination bo page protection.
326 * @mapped_src: Already mapped source page index in @dst_pages.
327 * @src_addr: Kernel virtual address of mapped source page.
328 * @src_pages: Array of source bo pages.
329 * @src_num_pages: Number of source bo pages.
330 * @src_prot: Source bo page protection.
331 * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine.
333 struct vmw_bo_blit_line_data {
336 struct page **dst_pages;
341 struct page **src_pages;
344 struct vmw_diff_cpy *diff;
348 * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another.
350 * @d: Blit data as described above.
351 * @dst_offset: Destination copy start offset from start of bo.
352 * @src_offset: Source copy start offset from start of bo.
353 * @bytes_to_copy: Number of bytes to copy in this line.
355 static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
360 struct vmw_diff_cpy *diff = d->diff;
362 while (bytes_to_copy) {
363 u32 copy_size = bytes_to_copy;
364 u32 dst_page = dst_offset >> PAGE_SHIFT;
365 u32 src_page = src_offset >> PAGE_SHIFT;
366 u32 dst_page_offset = dst_offset & ~PAGE_MASK;
367 u32 src_page_offset = src_offset & ~PAGE_MASK;
368 bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst;
369 bool unmap_src = d->src_addr && (src_page != d->mapped_src ||
372 copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
373 copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
376 ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
381 ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
386 if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
390 ttm_kmap_atomic_prot(d->dst_pages[dst_page],
395 d->mapped_dst = dst_page;
399 if (WARN_ON_ONCE(src_page >= d->src_num_pages))
403 ttm_kmap_atomic_prot(d->src_pages[src_page],
408 d->mapped_src = src_page;
410 diff->do_cpy(diff, d->dst_addr + dst_page_offset,
411 d->src_addr + src_page_offset, copy_size);
413 bytes_to_copy -= copy_size;
414 dst_offset += copy_size;
415 src_offset += copy_size;
422 * ttm_bo_cpu_blit - in-kernel cpu blit.
424 * @dst: Destination buffer object.
425 * @dst_offset: Destination offset of blit start in bytes.
426 * @dst_stride: Destination stride in bytes.
427 * @src: Source buffer object.
428 * @src_offset: Source offset of blit start in bytes.
429 * @src_stride: Source stride in bytes.
431 * @h: Height of blit.
432 * return: Zero on success. Negative error value on failure. Will print out
433 * kernel warnings on caller bugs.
435 * Performs a CPU blit from one buffer object to another avoiding a full
436 * bo vmap which may exhaust- or fragment vmalloc space.
437 * On supported architectures (x86), we're using kmap_atomic which avoids
438 * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
439 * reference already set-up mappings.
441 * Neither of the buffer objects may be placed in PCI memory
442 * (Fixed memory in TTM terminology) when using this function.
444 int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
445 u32 dst_offset, u32 dst_stride,
446 struct ttm_buffer_object *src,
447 u32 src_offset, u32 src_stride,
449 struct vmw_diff_cpy *diff)
451 struct ttm_operation_ctx ctx = {
452 .interruptible = false,
455 u32 j, initial_line = dst_offset / dst_stride;
456 struct vmw_bo_blit_line_data d;
459 /* Buffer objects need to be either pinned or reserved: */
460 if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
461 lockdep_assert_held(&dst->resv->lock.base);
462 if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
463 lockdep_assert_held(&src->resv->lock.base);
465 if (dst->ttm->state == tt_unpopulated) {
466 ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
471 if (src->ttm->state == tt_unpopulated) {
472 ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx);
481 d.dst_pages = dst->ttm->pages;
482 d.src_pages = src->ttm->pages;
483 d.dst_num_pages = dst->num_pages;
484 d.src_num_pages = src->num_pages;
485 d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
486 d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
489 for (j = 0; j < h; ++j) {
490 diff->line = j + initial_line;
491 diff->line_offset = dst_offset % dst_stride;
492 ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w);
496 dst_offset += dst_stride;
497 src_offset += src_stride;
501 ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
503 ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);