Commit | Line | Data |
---|---|---|
dc5698e8 DA |
1 | /* |
2 | * Copyright (C) 2015 Red Hat, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining | |
6 | * a copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sublicense, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the | |
14 | * next paragraph) shall be included in all copies or substantial | |
15 | * portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
24 | */ | |
25 | ||
dc5698e8 | 26 | #include <drm/drm_atomic_helper.h> |
c0967617 | 27 | #include <drm/drm_damage_helper.h> |
a3d63977 SR |
28 | #include <drm/drm_fourcc.h> |
29 | #include <drm/drm_plane_helper.h> | |
30 | ||
31 | #include "virtgpu_drv.h" | |
dc5698e8 DA |
32 | |
33 | static const uint32_t virtio_gpu_formats[] = { | |
42fd9e6c | 34 | DRM_FORMAT_HOST_XRGB8888, |
dc5698e8 DA |
35 | }; |
36 | ||
bbbed888 | 37 | static const uint32_t virtio_gpu_cursor_formats[] = { |
42fd9e6c | 38 | DRM_FORMAT_HOST_ARGB8888, |
bbbed888 GH |
39 | }; |
40 | ||
d519cb76 GH |
41 | uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) |
42 | { | |
43 | uint32_t format; | |
44 | ||
45 | switch (drm_fourcc) { | |
d519cb76 GH |
46 | case DRM_FORMAT_XRGB8888: |
47 | format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; | |
48 | break; | |
49 | case DRM_FORMAT_ARGB8888: | |
50 | format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; | |
51 | break; | |
52 | case DRM_FORMAT_BGRX8888: | |
53 | format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; | |
54 | break; | |
55 | case DRM_FORMAT_BGRA8888: | |
56 | format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; | |
57 | break; | |
d519cb76 GH |
58 | default: |
59 | /* | |
60 | * This should not happen, we handle everything listed | |
61 | * in virtio_gpu_formats[]. | |
62 | */ | |
63 | format = 0; | |
64 | break; | |
65 | } | |
66 | WARN_ON(format == 0); | |
67 | return format; | |
68 | } | |
69 | ||
dc5698e8 DA |
70 | static void virtio_gpu_plane_destroy(struct drm_plane *plane) |
71 | { | |
fb70046c | 72 | drm_plane_cleanup(plane); |
dc5698e8 DA |
73 | kfree(plane); |
74 | } | |
75 | ||
76 | static const struct drm_plane_funcs virtio_gpu_plane_funcs = { | |
77 | .update_plane = drm_atomic_helper_update_plane, | |
78 | .disable_plane = drm_atomic_helper_disable_plane, | |
79 | .destroy = virtio_gpu_plane_destroy, | |
80 | .reset = drm_atomic_helper_plane_reset, | |
81 | .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, | |
82 | .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, | |
83 | }; | |
84 | ||
85 | static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, | |
86 | struct drm_plane_state *state) | |
87 | { | |
a02c4c25 GH |
88 | bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; |
89 | struct drm_crtc_state *crtc_state; | |
90 | int ret; | |
91 | ||
4ad7056a | 92 | if (!state->fb || WARN_ON(!state->crtc)) |
a02c4c25 GH |
93 | return 0; |
94 | ||
95 | crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); | |
96 | if (IS_ERR(crtc_state)) | |
97 | return PTR_ERR(crtc_state); | |
98 | ||
99 | ret = drm_atomic_helper_check_plane_state(state, crtc_state, | |
100 | DRM_PLANE_HELPER_NO_SCALING, | |
101 | DRM_PLANE_HELPER_NO_SCALING, | |
102 | is_cursor, true); | |
103 | return ret; | |
dc5698e8 DA |
104 | } |
105 | ||
544c521d | 106 | static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, |
c0967617 GH |
107 | struct drm_plane_state *state, |
108 | struct drm_rect *rect) | |
544c521d | 109 | { |
c0967617 GH |
110 | struct virtio_gpu_object *bo = |
111 | gem_to_virtio_gpu_obj(state->fb->obj[0]); | |
544c521d | 112 | struct virtio_gpu_object_array *objs; |
c0967617 GH |
113 | uint32_t w = rect->x2 - rect->x1; |
114 | uint32_t h = rect->y2 - rect->y1; | |
115 | uint32_t x = rect->x1; | |
116 | uint32_t y = rect->y1; | |
117 | uint32_t off = x * state->fb->format->cpp[0] + | |
118 | y * state->fb->pitches[0]; | |
544c521d GH |
119 | |
120 | objs = virtio_gpu_array_alloc(1); | |
121 | if (!objs) | |
122 | return; | |
123 | virtio_gpu_array_add_obj(objs, &bo->base.base); | |
c0967617 GH |
124 | |
125 | virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, | |
126 | objs, NULL); | |
544c521d GH |
127 | } |
128 | ||
bbbed888 GH |
129 | static void virtio_gpu_primary_plane_update(struct drm_plane *plane, |
130 | struct drm_plane_state *old_state) | |
dc5698e8 DA |
131 | { |
132 | struct drm_device *dev = plane->dev; | |
133 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
d3767d49 | 134 | struct virtio_gpu_output *output = NULL; |
dc5698e8 | 135 | struct virtio_gpu_object *bo; |
c0967617 | 136 | struct drm_rect rect; |
dc5698e8 | 137 | |
d3767d49 GH |
138 | if (plane->state->crtc) |
139 | output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); | |
140 | if (old_state->crtc) | |
141 | output = drm_crtc_to_virtio_gpu_output(old_state->crtc); | |
b28c69dd HS |
142 | if (WARN_ON(!output)) |
143 | return; | |
d3767d49 | 144 | |
64440ef6 GH |
145 | if (!plane->state->fb || !output->enabled) { |
146 | DRM_DEBUG("nofb\n"); | |
147 | virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, | |
148 | plane->state->src_w >> 16, | |
149 | plane->state->src_h >> 16, | |
150 | 0, 0); | |
151 | return; | |
152 | } | |
153 | ||
c0967617 GH |
154 | if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) |
155 | return; | |
156 | ||
c0967617 | 157 | bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); |
544c521d | 158 | if (bo->dumb) |
c0967617 | 159 | virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); |
dc5698e8 | 160 | |
3954ff10 GH |
161 | if (plane->state->fb != old_state->fb || |
162 | plane->state->src_w != old_state->src_w || | |
163 | plane->state->src_h != old_state->src_h || | |
164 | plane->state->src_x != old_state->src_x || | |
165 | plane->state->src_y != old_state->src_y) { | |
166 | DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", | |
167 | bo->hw_res_handle, | |
168 | plane->state->crtc_w, plane->state->crtc_h, | |
169 | plane->state->crtc_x, plane->state->crtc_y, | |
170 | plane->state->src_w >> 16, | |
171 | plane->state->src_h >> 16, | |
172 | plane->state->src_x >> 16, | |
173 | plane->state->src_y >> 16); | |
174 | virtio_gpu_cmd_set_scanout(vgdev, output->index, | |
175 | bo->hw_res_handle, | |
176 | plane->state->src_w >> 16, | |
177 | plane->state->src_h >> 16, | |
178 | plane->state->src_x >> 16, | |
179 | plane->state->src_y >> 16); | |
180 | } | |
181 | ||
64440ef6 | 182 | virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, |
c0967617 GH |
183 | rect.x1, |
184 | rect.y1, | |
185 | rect.x2 - rect.x1, | |
186 | rect.y2 - rect.y1); | |
dc5698e8 DA |
187 | } |
188 | ||
9fdd90c0 RF |
189 | static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, |
190 | struct drm_plane_state *new_state) | |
191 | { | |
192 | struct drm_device *dev = plane->dev; | |
193 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
194 | struct virtio_gpu_framebuffer *vgfb; | |
195 | struct virtio_gpu_object *bo; | |
196 | ||
197 | if (!new_state->fb) | |
198 | return 0; | |
199 | ||
200 | vgfb = to_virtio_gpu_framebuffer(new_state->fb); | |
201 | bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); | |
202 | if (bo && bo->dumb && (plane->state->fb != new_state->fb)) { | |
203 | vgfb->fence = virtio_gpu_fence_alloc(vgdev); | |
204 | if (!vgfb->fence) | |
205 | return -ENOMEM; | |
206 | } | |
207 | ||
208 | return 0; | |
209 | } | |
210 | ||
211 | static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, | |
212 | struct drm_plane_state *old_state) | |
213 | { | |
214 | struct virtio_gpu_framebuffer *vgfb; | |
215 | ||
216 | if (!plane->state->fb) | |
217 | return; | |
218 | ||
219 | vgfb = to_virtio_gpu_framebuffer(plane->state->fb); | |
cb66c6da GH |
220 | if (vgfb->fence) { |
221 | dma_fence_put(&vgfb->fence->f); | |
222 | vgfb->fence = NULL; | |
223 | } | |
9fdd90c0 RF |
224 | } |
225 | ||
bbbed888 GH |
226 | static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, |
227 | struct drm_plane_state *old_state) | |
228 | { | |
229 | struct drm_device *dev = plane->dev; | |
230 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
231 | struct virtio_gpu_output *output = NULL; | |
232 | struct virtio_gpu_framebuffer *vgfb; | |
bbbed888 GH |
233 | struct virtio_gpu_object *bo = NULL; |
234 | uint32_t handle; | |
dc5698e8 | 235 | |
bbbed888 GH |
236 | if (plane->state->crtc) |
237 | output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); | |
238 | if (old_state->crtc) | |
239 | output = drm_crtc_to_virtio_gpu_output(old_state->crtc); | |
b28c69dd HS |
240 | if (WARN_ON(!output)) |
241 | return; | |
bbbed888 GH |
242 | |
243 | if (plane->state->fb) { | |
244 | vgfb = to_virtio_gpu_framebuffer(plane->state->fb); | |
3823da3a | 245 | bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); |
bbbed888 GH |
246 | handle = bo->hw_res_handle; |
247 | } else { | |
248 | handle = 0; | |
249 | } | |
250 | ||
251 | if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { | |
252 | /* new cursor -- update & wait */ | |
3d3bdbc0 GH |
253 | struct virtio_gpu_object_array *objs; |
254 | ||
255 | objs = virtio_gpu_array_alloc(1); | |
256 | if (!objs) | |
257 | return; | |
258 | virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); | |
5cfd31c5 | 259 | virtio_gpu_array_lock_resv(objs); |
bbbed888 | 260 | virtio_gpu_cmd_transfer_to_host_2d |
3d3bdbc0 | 261 | (vgdev, 0, |
64f1cc99 GH |
262 | plane->state->crtc_w, |
263 | plane->state->crtc_h, | |
3d3bdbc0 | 264 | 0, 0, objs, vgfb->fence); |
620f9c5e GH |
265 | dma_fence_wait(&vgfb->fence->f, true); |
266 | dma_fence_put(&vgfb->fence->f); | |
267 | vgfb->fence = NULL; | |
bbbed888 GH |
268 | } |
269 | ||
270 | if (plane->state->fb != old_state->fb) { | |
86f752d2 | 271 | DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, |
bbbed888 | 272 | plane->state->crtc_x, |
86f752d2 GH |
273 | plane->state->crtc_y, |
274 | plane->state->fb ? plane->state->fb->hot_x : 0, | |
275 | plane->state->fb ? plane->state->fb->hot_y : 0); | |
bbbed888 GH |
276 | output->cursor.hdr.type = |
277 | cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); | |
278 | output->cursor.resource_id = cpu_to_le32(handle); | |
86f752d2 GH |
279 | if (plane->state->fb) { |
280 | output->cursor.hot_x = | |
281 | cpu_to_le32(plane->state->fb->hot_x); | |
282 | output->cursor.hot_y = | |
283 | cpu_to_le32(plane->state->fb->hot_y); | |
284 | } else { | |
285 | output->cursor.hot_x = cpu_to_le32(0); | |
286 | output->cursor.hot_y = cpu_to_le32(0); | |
287 | } | |
bbbed888 GH |
288 | } else { |
289 | DRM_DEBUG("move +%d+%d\n", | |
290 | plane->state->crtc_x, | |
291 | plane->state->crtc_y); | |
292 | output->cursor.hdr.type = | |
293 | cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); | |
294 | } | |
295 | output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); | |
296 | output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); | |
297 | virtio_gpu_cursor_ping(vgdev, output); | |
298 | } | |
299 | ||
300 | static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { | |
dc5698e8 | 301 | .atomic_check = virtio_gpu_plane_atomic_check, |
bbbed888 GH |
302 | .atomic_update = virtio_gpu_primary_plane_update, |
303 | }; | |
304 | ||
305 | static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { | |
9fdd90c0 RF |
306 | .prepare_fb = virtio_gpu_cursor_prepare_fb, |
307 | .cleanup_fb = virtio_gpu_cursor_cleanup_fb, | |
bbbed888 GH |
308 | .atomic_check = virtio_gpu_plane_atomic_check, |
309 | .atomic_update = virtio_gpu_cursor_plane_update, | |
dc5698e8 DA |
310 | }; |
311 | ||
312 | struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, | |
bbbed888 | 313 | enum drm_plane_type type, |
dc5698e8 DA |
314 | int index) |
315 | { | |
316 | struct drm_device *dev = vgdev->ddev; | |
bbbed888 | 317 | const struct drm_plane_helper_funcs *funcs; |
dc5698e8 | 318 | struct drm_plane *plane; |
bbbed888 GH |
319 | const uint32_t *formats; |
320 | int ret, nformats; | |
dc5698e8 DA |
321 | |
322 | plane = kzalloc(sizeof(*plane), GFP_KERNEL); | |
323 | if (!plane) | |
324 | return ERR_PTR(-ENOMEM); | |
325 | ||
bbbed888 GH |
326 | if (type == DRM_PLANE_TYPE_CURSOR) { |
327 | formats = virtio_gpu_cursor_formats; | |
328 | nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); | |
329 | funcs = &virtio_gpu_cursor_helper_funcs; | |
330 | } else { | |
331 | formats = virtio_gpu_formats; | |
332 | nformats = ARRAY_SIZE(virtio_gpu_formats); | |
333 | funcs = &virtio_gpu_primary_helper_funcs; | |
334 | } | |
dc5698e8 DA |
335 | ret = drm_universal_plane_init(dev, plane, 1 << index, |
336 | &virtio_gpu_plane_funcs, | |
bbbed888 | 337 | formats, nformats, |
e6fc3b68 | 338 | NULL, type, NULL); |
dc5698e8 DA |
339 | if (ret) |
340 | goto err_plane_init; | |
341 | ||
bbbed888 | 342 | drm_plane_helper_add(plane, funcs); |
dc5698e8 DA |
343 | return plane; |
344 | ||
345 | err_plane_init: | |
346 | kfree(plane); | |
347 | return ERR_PTR(ret); | |
348 | } |