Commit | Line | Data |
---|---|---|
84ec374b RT |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | ||
3 | #include <drm/drm_atomic_helper.h> | |
4 | #include <drm/drm_simple_kms_helper.h> | |
5 | #include <drm/drm_vblank.h> | |
6 | ||
7 | #include "amdgpu.h" | |
733ee71a RT |
8 | #ifdef CONFIG_DRM_AMDGPU_SI |
9 | #include "dce_v6_0.h" | |
10 | #endif | |
11 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
12 | #include "dce_v8_0.h" | |
13 | #endif | |
14 | #include "dce_v10_0.h" | |
15 | #include "dce_v11_0.h" | |
16 | #include "ivsrcid/ivsrcid_vislands30.h" | |
84ec374b RT |
17 | #include "amdgpu_vkms.h" |
18 | #include "amdgpu_display.h" | |
deefd07e FC |
19 | #include "atom.h" |
20 | #include "amdgpu_irq.h" | |
84ec374b RT |
21 | |
22 | /** | |
23 | * DOC: amdgpu_vkms | |
24 | * | |
25 | * The amdgpu vkms interface provides a virtual KMS interface for several use | |
26 | * cases: devices without display hardware, platforms where the actual display | |
27 | * hardware is not useful (e.g., servers), SR-IOV virtual functions, device | |
28 | * emulation/simulation, and device bring up prior to display hardware being | |
29 | * usable. We previously emulated a legacy KMS interface, but there was a desire | |
30 | * to move to the atomic KMS interface. The vkms driver did everything we | |
31 | * needed, but we wanted KMS support natively in the driver without buffer | |
32 | * sharing and the ability to support an instance of VKMS per device. We first | |
33 | * looked at splitting vkms into a stub driver and a helper module that other | |
34 | * drivers could use to implement a virtual display, but this strategy ended up | |
35 | * being messy due to driver specific callbacks needed for buffer management. | |
36 | * Ultimately, it proved easier to import the vkms code as it mostly used core | |
37 | * drm helpers anyway. | |
38 | */ | |
39 | ||
40 | static const u32 amdgpu_vkms_formats[] = { | |
41 | DRM_FORMAT_XRGB8888, | |
42 | }; | |
43 | ||
44 | static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) | |
45 | { | |
deefd07e FC |
46 | struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer); |
47 | struct drm_crtc *crtc = &amdgpu_crtc->base; | |
48 | struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); | |
84ec374b RT |
49 | u64 ret_overrun; |
50 | bool ret; | |
51 | ||
deefd07e | 52 | ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer, |
84ec374b | 53 | output->period_ns); |
f7ed3f90 FC |
54 | if (ret_overrun != 1) |
55 | DRM_WARN("%s: vblank timer overrun\n", __func__); | |
84ec374b RT |
56 | |
57 | ret = drm_crtc_handle_vblank(crtc); | |
58 | if (!ret) | |
59 | DRM_ERROR("amdgpu_vkms failure on handling vblank"); | |
60 | ||
61 | return HRTIMER_RESTART; | |
62 | } | |
63 | ||
64 | static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) | |
65 | { | |
66 | struct drm_device *dev = crtc->dev; | |
67 | unsigned int pipe = drm_crtc_index(crtc); | |
68 | struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; | |
69 | struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); | |
deefd07e | 70 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
84ec374b RT |
71 | |
72 | drm_calc_timestamping_constants(crtc, &crtc->mode); | |
73 | ||
84ec374b | 74 | out->period_ns = ktime_set(0, vblank->framedur_ns); |
deefd07e | 75 | hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL); |
84ec374b RT |
76 | |
77 | return 0; | |
78 | } | |
79 | ||
80 | static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) | |
81 | { | |
deefd07e | 82 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
84ec374b | 83 | |
deefd07e | 84 | hrtimer_cancel(&amdgpu_crtc->vblank_timer); |
84ec374b RT |
85 | } |
86 | ||
87 | static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, | |
88 | int *max_error, | |
89 | ktime_t *vblank_time, | |
90 | bool in_vblank_irq) | |
91 | { | |
92 | struct drm_device *dev = crtc->dev; | |
93 | unsigned int pipe = crtc->index; | |
94 | struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); | |
95 | struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; | |
deefd07e | 96 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
84ec374b RT |
97 | |
98 | if (!READ_ONCE(vblank->enabled)) { | |
99 | *vblank_time = ktime_get(); | |
100 | return true; | |
101 | } | |
102 | ||
deefd07e | 103 | *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires); |
84ec374b RT |
104 | |
105 | if (WARN_ON(*vblank_time == vblank->time)) | |
106 | return true; | |
107 | ||
108 | /* | |
109 | * To prevent races we roll the hrtimer forward before we do any | |
110 | * interrupt processing - this is how real hw works (the interrupt is | |
111 | * only generated after all the vblank registers are updated) and what | |
112 | * the vblank core expects. Therefore we need to always correct the | |
113 | * timestampe by one frame. | |
114 | */ | |
115 | *vblank_time -= output->period_ns; | |
116 | ||
117 | return true; | |
118 | } | |
119 | ||
120 | static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = { | |
121 | .set_config = drm_atomic_helper_set_config, | |
122 | .destroy = drm_crtc_cleanup, | |
123 | .page_flip = drm_atomic_helper_page_flip, | |
124 | .reset = drm_atomic_helper_crtc_reset, | |
125 | .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, | |
126 | .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, | |
127 | .enable_vblank = amdgpu_vkms_enable_vblank, | |
128 | .disable_vblank = amdgpu_vkms_disable_vblank, | |
129 | .get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp, | |
130 | }; | |
131 | ||
132 | static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc, | |
133 | struct drm_atomic_state *state) | |
134 | { | |
135 | drm_crtc_vblank_on(crtc); | |
136 | } | |
137 | ||
138 | static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, | |
139 | struct drm_atomic_state *state) | |
140 | { | |
141 | drm_crtc_vblank_off(crtc); | |
142 | } | |
143 | ||
144 | static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, | |
145 | struct drm_atomic_state *state) | |
146 | { | |
2096b74b | 147 | unsigned long flags; |
84ec374b | 148 | if (crtc->state->event) { |
2096b74b | 149 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
84ec374b RT |
150 | |
151 | if (drm_crtc_vblank_get(crtc) != 0) | |
152 | drm_crtc_send_vblank_event(crtc, crtc->state->event); | |
153 | else | |
154 | drm_crtc_arm_vblank_event(crtc, crtc->state->event); | |
155 | ||
2096b74b | 156 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
84ec374b RT |
157 | |
158 | crtc->state->event = NULL; | |
159 | } | |
160 | } | |
161 | ||
162 | static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { | |
163 | .atomic_flush = amdgpu_vkms_crtc_atomic_flush, | |
164 | .atomic_enable = amdgpu_vkms_crtc_atomic_enable, | |
165 | .atomic_disable = amdgpu_vkms_crtc_atomic_disable, | |
166 | }; | |
167 | ||
168 | static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, | |
169 | struct drm_plane *primary, struct drm_plane *cursor) | |
170 | { | |
deefd07e FC |
171 | struct amdgpu_device *adev = drm_to_adev(dev); |
172 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | |
84ec374b RT |
173 | int ret; |
174 | ||
175 | ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, | |
176 | &amdgpu_vkms_crtc_funcs, NULL); | |
177 | if (ret) { | |
178 | DRM_ERROR("Failed to init CRTC\n"); | |
179 | return ret; | |
180 | } | |
181 | ||
182 | drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs); | |
183 | ||
deefd07e FC |
184 | amdgpu_crtc->crtc_id = drm_crtc_index(crtc); |
185 | adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc; | |
186 | ||
187 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; | |
188 | amdgpu_crtc->encoder = NULL; | |
189 | amdgpu_crtc->connector = NULL; | |
190 | amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; | |
191 | ||
192 | hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
193 | amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate; | |
194 | ||
84ec374b RT |
195 | return ret; |
196 | } | |
197 | ||
198 | static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = { | |
199 | .fill_modes = drm_helper_probe_single_connector_modes, | |
200 | .destroy = drm_connector_cleanup, | |
201 | .reset = drm_atomic_helper_connector_reset, | |
202 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | |
203 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | |
204 | }; | |
205 | ||
206 | static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector) | |
207 | { | |
208 | struct drm_device *dev = connector->dev; | |
209 | struct drm_display_mode *mode = NULL; | |
210 | unsigned i; | |
211 | static const struct mode_size { | |
212 | int w; | |
213 | int h; | |
214 | } common_modes[] = { | |
215 | { 640, 480}, | |
216 | { 720, 480}, | |
217 | { 800, 600}, | |
218 | { 848, 480}, | |
219 | {1024, 768}, | |
220 | {1152, 768}, | |
221 | {1280, 720}, | |
222 | {1280, 800}, | |
223 | {1280, 854}, | |
224 | {1280, 960}, | |
225 | {1280, 1024}, | |
226 | {1440, 900}, | |
227 | {1400, 1050}, | |
228 | {1680, 1050}, | |
229 | {1600, 1200}, | |
230 | {1920, 1080}, | |
231 | {1920, 1200}, | |
232 | {2560, 1440}, | |
233 | {4096, 3112}, | |
234 | {3656, 2664}, | |
235 | {3840, 2160}, | |
236 | {4096, 2160}, | |
237 | }; | |
238 | ||
239 | for (i = 0; i < ARRAY_SIZE(common_modes); i++) { | |
240 | mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); | |
241 | drm_mode_probed_add(connector, mode); | |
242 | } | |
243 | ||
244 | drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); | |
245 | ||
246 | return ARRAY_SIZE(common_modes); | |
247 | } | |
248 | ||
249 | static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = { | |
250 | .get_modes = amdgpu_vkms_conn_get_modes, | |
251 | }; | |
252 | ||
253 | static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = { | |
254 | .update_plane = drm_atomic_helper_update_plane, | |
255 | .disable_plane = drm_atomic_helper_disable_plane, | |
256 | .destroy = drm_plane_cleanup, | |
257 | .reset = drm_atomic_helper_plane_reset, | |
258 | .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, | |
259 | .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, | |
260 | }; | |
261 | ||
262 | static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane, | |
263 | struct drm_atomic_state *old_state) | |
264 | { | |
265 | return; | |
266 | } | |
267 | ||
268 | static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane, | |
269 | struct drm_atomic_state *state) | |
270 | { | |
271 | struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, | |
272 | plane); | |
273 | struct drm_crtc_state *crtc_state; | |
274 | int ret; | |
275 | ||
276 | if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) | |
277 | return 0; | |
278 | ||
279 | crtc_state = drm_atomic_get_crtc_state(state, | |
280 | new_plane_state->crtc); | |
281 | if (IS_ERR(crtc_state)) | |
282 | return PTR_ERR(crtc_state); | |
283 | ||
284 | ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, | |
285 | DRM_PLANE_HELPER_NO_SCALING, | |
286 | DRM_PLANE_HELPER_NO_SCALING, | |
287 | false, true); | |
288 | if (ret != 0) | |
289 | return ret; | |
290 | ||
291 | /* for now primary plane must be visible and full screen */ | |
292 | if (!new_plane_state->visible) | |
293 | return -EINVAL; | |
294 | ||
295 | return 0; | |
296 | } | |
297 | ||
298 | static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, | |
299 | struct drm_plane_state *new_state) | |
300 | { | |
301 | struct amdgpu_framebuffer *afb; | |
302 | struct drm_gem_object *obj; | |
303 | struct amdgpu_device *adev; | |
304 | struct amdgpu_bo *rbo; | |
305 | struct list_head list; | |
306 | struct ttm_validate_buffer tv; | |
307 | struct ww_acquire_ctx ticket; | |
308 | uint32_t domain; | |
309 | int r; | |
310 | ||
311 | if (!new_state->fb) { | |
312 | DRM_DEBUG_KMS("No FB bound\n"); | |
313 | return 0; | |
314 | } | |
315 | afb = to_amdgpu_framebuffer(new_state->fb); | |
316 | obj = new_state->fb->obj[0]; | |
317 | rbo = gem_to_amdgpu_bo(obj); | |
318 | adev = amdgpu_ttm_adev(rbo->tbo.bdev); | |
319 | INIT_LIST_HEAD(&list); | |
320 | ||
321 | tv.bo = &rbo->tbo; | |
322 | tv.num_shared = 1; | |
323 | list_add(&tv.head, &list); | |
324 | ||
325 | r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); | |
326 | if (r) { | |
327 | dev_err(adev->dev, "fail to reserve bo (%d)\n", r); | |
328 | return r; | |
329 | } | |
330 | ||
331 | if (plane->type != DRM_PLANE_TYPE_CURSOR) | |
332 | domain = amdgpu_display_supported_domains(adev, rbo->flags); | |
333 | else | |
334 | domain = AMDGPU_GEM_DOMAIN_VRAM; | |
335 | ||
336 | r = amdgpu_bo_pin(rbo, domain); | |
337 | if (unlikely(r != 0)) { | |
338 | if (r != -ERESTARTSYS) | |
339 | DRM_ERROR("Failed to pin framebuffer with error %d\n", r); | |
340 | ttm_eu_backoff_reservation(&ticket, &list); | |
341 | return r; | |
342 | } | |
343 | ||
344 | r = amdgpu_ttm_alloc_gart(&rbo->tbo); | |
345 | if (unlikely(r != 0)) { | |
346 | amdgpu_bo_unpin(rbo); | |
347 | ttm_eu_backoff_reservation(&ticket, &list); | |
348 | DRM_ERROR("%p bind failed\n", rbo); | |
349 | return r; | |
350 | } | |
351 | ||
352 | ttm_eu_backoff_reservation(&ticket, &list); | |
353 | ||
354 | afb->address = amdgpu_bo_gpu_offset(rbo); | |
355 | ||
356 | amdgpu_bo_ref(rbo); | |
357 | ||
358 | return 0; | |
359 | } | |
360 | ||
361 | static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, | |
362 | struct drm_plane_state *old_state) | |
363 | { | |
364 | struct amdgpu_bo *rbo; | |
365 | int r; | |
366 | ||
367 | if (!old_state->fb) | |
368 | return; | |
369 | ||
370 | rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); | |
371 | r = amdgpu_bo_reserve(rbo, false); | |
372 | if (unlikely(r)) { | |
373 | DRM_ERROR("failed to reserve rbo before unpin\n"); | |
374 | return; | |
375 | } | |
376 | ||
377 | amdgpu_bo_unpin(rbo); | |
378 | amdgpu_bo_unreserve(rbo); | |
379 | amdgpu_bo_unref(&rbo); | |
380 | } | |
381 | ||
382 | static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = { | |
383 | .atomic_update = amdgpu_vkms_plane_atomic_update, | |
384 | .atomic_check = amdgpu_vkms_plane_atomic_check, | |
385 | .prepare_fb = amdgpu_vkms_prepare_fb, | |
386 | .cleanup_fb = amdgpu_vkms_cleanup_fb, | |
387 | }; | |
388 | ||
389 | static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, | |
390 | enum drm_plane_type type, | |
391 | int index) | |
392 | { | |
393 | struct drm_plane *plane; | |
4046afce | 394 | uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID}; |
84ec374b RT |
395 | int ret; |
396 | ||
397 | plane = kzalloc(sizeof(*plane), GFP_KERNEL); | |
398 | if (!plane) | |
399 | return ERR_PTR(-ENOMEM); | |
400 | ||
401 | ret = drm_universal_plane_init(dev, plane, 1 << index, | |
402 | &amdgpu_vkms_plane_funcs, | |
403 | amdgpu_vkms_formats, | |
404 | ARRAY_SIZE(amdgpu_vkms_formats), | |
4046afce | 405 | modifiers, type, NULL); |
84ec374b RT |
406 | if (ret) { |
407 | kfree(plane); | |
408 | return ERR_PTR(ret); | |
409 | } | |
410 | ||
411 | drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs); | |
412 | ||
413 | return plane; | |
414 | } | |
415 | ||
2351b7d4 IB |
416 | static int amdgpu_vkms_output_init(struct drm_device *dev, struct |
417 | amdgpu_vkms_output *output, int index) | |
84ec374b RT |
418 | { |
419 | struct drm_connector *connector = &output->connector; | |
420 | struct drm_encoder *encoder = &output->encoder; | |
deefd07e | 421 | struct drm_crtc *crtc = &output->crtc.base; |
84ec374b RT |
422 | struct drm_plane *primary, *cursor = NULL; |
423 | int ret; | |
424 | ||
425 | primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index); | |
426 | if (IS_ERR(primary)) | |
427 | return PTR_ERR(primary); | |
428 | ||
429 | ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor); | |
430 | if (ret) | |
431 | goto err_crtc; | |
432 | ||
433 | ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs, | |
434 | DRM_MODE_CONNECTOR_VIRTUAL); | |
435 | if (ret) { | |
436 | DRM_ERROR("Failed to init connector\n"); | |
437 | goto err_connector; | |
438 | } | |
439 | ||
440 | drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs); | |
441 | ||
442 | ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); | |
443 | if (ret) { | |
444 | DRM_ERROR("Failed to init encoder\n"); | |
445 | goto err_encoder; | |
446 | } | |
447 | encoder->possible_crtcs = 1 << index; | |
448 | ||
449 | ret = drm_connector_attach_encoder(connector, encoder); | |
450 | if (ret) { | |
451 | DRM_ERROR("Failed to attach connector to encoder\n"); | |
452 | goto err_attach; | |
453 | } | |
454 | ||
455 | drm_mode_config_reset(dev); | |
456 | ||
457 | return 0; | |
458 | ||
459 | err_attach: | |
460 | drm_encoder_cleanup(encoder); | |
461 | ||
462 | err_encoder: | |
463 | drm_connector_cleanup(connector); | |
464 | ||
465 | err_connector: | |
466 | drm_crtc_cleanup(crtc); | |
467 | ||
468 | err_crtc: | |
469 | drm_plane_cleanup(primary); | |
470 | ||
471 | return ret; | |
472 | } | |
733ee71a RT |
473 | |
474 | const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = { | |
475 | .fb_create = amdgpu_display_user_framebuffer_create, | |
476 | .atomic_check = drm_atomic_helper_check, | |
477 | .atomic_commit = drm_atomic_helper_commit, | |
478 | }; | |
479 | ||
480 | static int amdgpu_vkms_sw_init(void *handle) | |
481 | { | |
482 | int r, i; | |
483 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
484 | ||
30c1e391 FC |
485 | adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, |
486 | sizeof(struct amdgpu_vkms_output), GFP_KERNEL); | |
487 | if (!adev->amdgpu_vkms_output) | |
488 | return -ENOMEM; | |
489 | ||
733ee71a RT |
490 | adev_to_drm(adev)->max_vblank_count = 0; |
491 | ||
492 | adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs; | |
493 | ||
494 | adev_to_drm(adev)->mode_config.max_width = XRES_MAX; | |
495 | adev_to_drm(adev)->mode_config.max_height = YRES_MAX; | |
496 | ||
497 | adev_to_drm(adev)->mode_config.preferred_depth = 24; | |
498 | adev_to_drm(adev)->mode_config.prefer_shadow = 1; | |
499 | ||
500 | adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; | |
501 | ||
502 | r = amdgpu_display_modeset_create_props(adev); | |
503 | if (r) | |
504 | return r; | |
505 | ||
733ee71a RT |
506 | /* allocate crtcs, encoders, connectors */ |
507 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | |
508 | r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i); | |
509 | if (r) | |
510 | return r; | |
511 | } | |
512 | ||
513 | drm_kms_helper_poll_init(adev_to_drm(adev)); | |
514 | ||
515 | adev->mode_info.mode_config_initialized = true; | |
516 | return 0; | |
517 | } | |
518 | ||
519 | static int amdgpu_vkms_sw_fini(void *handle) | |
520 | { | |
521 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
522 | int i = 0; | |
523 | ||
524 | for (i = 0; i < adev->mode_info.num_crtc; i++) | |
deefd07e FC |
525 | if (adev->mode_info.crtcs[i]) |
526 | hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); | |
733ee71a | 527 | |
733ee71a | 528 | drm_kms_helper_poll_fini(adev_to_drm(adev)); |
30c1e391 | 529 | drm_mode_config_cleanup(adev_to_drm(adev)); |
733ee71a RT |
530 | |
531 | adev->mode_info.mode_config_initialized = false; | |
30c1e391 FC |
532 | |
533 | kfree(adev->mode_info.bios_hardcoded_edid); | |
534 | kfree(adev->amdgpu_vkms_output); | |
733ee71a RT |
535 | return 0; |
536 | } | |
537 | ||
538 | static int amdgpu_vkms_hw_init(void *handle) | |
539 | { | |
540 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
541 | ||
542 | switch (adev->asic_type) { | |
543 | #ifdef CONFIG_DRM_AMDGPU_SI | |
544 | case CHIP_TAHITI: | |
545 | case CHIP_PITCAIRN: | |
546 | case CHIP_VERDE: | |
547 | case CHIP_OLAND: | |
548 | dce_v6_0_disable_dce(adev); | |
549 | break; | |
550 | #endif | |
551 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
552 | case CHIP_BONAIRE: | |
553 | case CHIP_HAWAII: | |
554 | case CHIP_KAVERI: | |
555 | case CHIP_KABINI: | |
556 | case CHIP_MULLINS: | |
557 | dce_v8_0_disable_dce(adev); | |
558 | break; | |
559 | #endif | |
560 | case CHIP_FIJI: | |
561 | case CHIP_TONGA: | |
562 | dce_v10_0_disable_dce(adev); | |
563 | break; | |
564 | case CHIP_CARRIZO: | |
565 | case CHIP_STONEY: | |
566 | case CHIP_POLARIS10: | |
567 | case CHIP_POLARIS11: | |
568 | case CHIP_VEGAM: | |
569 | dce_v11_0_disable_dce(adev); | |
570 | break; | |
571 | case CHIP_TOPAZ: | |
572 | #ifdef CONFIG_DRM_AMDGPU_SI | |
573 | case CHIP_HAINAN: | |
574 | #endif | |
575 | /* no DCE */ | |
576 | break; | |
577 | default: | |
578 | break; | |
579 | } | |
580 | return 0; | |
581 | } | |
582 | ||
583 | static int amdgpu_vkms_hw_fini(void *handle) | |
584 | { | |
585 | return 0; | |
586 | } | |
587 | ||
588 | static int amdgpu_vkms_suspend(void *handle) | |
589 | { | |
590 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
591 | int r; | |
592 | ||
593 | r = drm_mode_config_helper_suspend(adev_to_drm(adev)); | |
594 | if (r) | |
595 | return r; | |
596 | return amdgpu_vkms_hw_fini(handle); | |
597 | } | |
598 | ||
599 | static int amdgpu_vkms_resume(void *handle) | |
600 | { | |
601 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
602 | int r; | |
603 | ||
604 | r = amdgpu_vkms_hw_init(handle); | |
605 | if (r) | |
606 | return r; | |
607 | return drm_mode_config_helper_resume(adev_to_drm(adev)); | |
608 | } | |
609 | ||
610 | static bool amdgpu_vkms_is_idle(void *handle) | |
611 | { | |
612 | return true; | |
613 | } | |
614 | ||
615 | static int amdgpu_vkms_wait_for_idle(void *handle) | |
616 | { | |
617 | return 0; | |
618 | } | |
619 | ||
620 | static int amdgpu_vkms_soft_reset(void *handle) | |
621 | { | |
622 | return 0; | |
623 | } | |
624 | ||
625 | static int amdgpu_vkms_set_clockgating_state(void *handle, | |
626 | enum amd_clockgating_state state) | |
627 | { | |
628 | return 0; | |
629 | } | |
630 | ||
631 | static int amdgpu_vkms_set_powergating_state(void *handle, | |
632 | enum amd_powergating_state state) | |
633 | { | |
634 | return 0; | |
635 | } | |
636 | ||
637 | static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { | |
638 | .name = "amdgpu_vkms", | |
639 | .early_init = NULL, | |
640 | .late_init = NULL, | |
641 | .sw_init = amdgpu_vkms_sw_init, | |
642 | .sw_fini = amdgpu_vkms_sw_fini, | |
643 | .hw_init = amdgpu_vkms_hw_init, | |
644 | .hw_fini = amdgpu_vkms_hw_fini, | |
645 | .suspend = amdgpu_vkms_suspend, | |
646 | .resume = amdgpu_vkms_resume, | |
647 | .is_idle = amdgpu_vkms_is_idle, | |
648 | .wait_for_idle = amdgpu_vkms_wait_for_idle, | |
649 | .soft_reset = amdgpu_vkms_soft_reset, | |
650 | .set_clockgating_state = amdgpu_vkms_set_clockgating_state, | |
651 | .set_powergating_state = amdgpu_vkms_set_powergating_state, | |
652 | }; | |
653 | ||
654 | const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = | |
655 | { | |
656 | .type = AMD_IP_BLOCK_TYPE_DCE, | |
657 | .major = 1, | |
658 | .minor = 0, | |
659 | .rev = 0, | |
660 | .funcs = &amdgpu_vkms_ip_funcs, | |
661 | }; | |
662 |