drm/amd/display: Match actual state during S3 resume.
authorLeo (Sunpeng) Li <sunpeng.li@amd.com>
Thu, 7 Sep 2017 21:05:38 +0000 (17:05 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Sat, 21 Oct 2017 20:40:54 +0000 (16:40 -0400)
During system suspend, we:

1. Cache a duplicate of the current DRM atomic state, which calls hooks
   to duplicate our internal states.
2. Call hooks to disable all functionalities.
3. System suspended.

During resume, we attempt to restore the cached state. However, our
interal states are now stale, since step 1 was done before step 2.
i.e. our cached state does not reflect the disabled nature of things.

This change resolves that by destroying all relevant states to reflect
the actual state during resume.

Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

index 75212f74f027b62e2271088a2717bd57e38ef0b7..ecca2862407f04c03e86f899320d7e6ebb5d7bb5 100644 (file)
@@ -648,6 +648,11 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
        struct drm_connector *connector;
        struct drm_crtc *crtc;
        struct drm_crtc_state *new_crtc_state;
+       struct dm_crtc_state *dm_crtc_state;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
+       struct dm_plane_state *dm_plane_state;
+       struct dm_atomic_state *cached_state;
        int ret = 0;
        int i;
 
@@ -686,6 +691,37 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
        for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
                new_crtc_state->active_changed = true;
 
+       cached_state = to_dm_atomic_state(adev->dm.cached_state);
+
+       /*
+        * During suspend, the cached state is saved before all streams are
+        * disabled. Refresh cached state to match actual current state before
+        * restoring it.
+        */
+       WARN_ON(kref_read(&cached_state->context->refcount) > 1);
+       dc_release_state(cached_state->context);
+
+       cached_state->context = dc_create_state();
+       dc_resource_state_copy_construct_current(adev->dm.dc, cached_state->context);
+
+       for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
+               dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+               if (dm_crtc_state->stream) {
+                       WARN_ON(kref_read(&dm_crtc_state->stream->refcount) > 1);
+                       dc_stream_release(dm_crtc_state->stream);
+                       dm_crtc_state->stream = NULL;
+               }
+       }
+
+       for_each_new_plane_in_state(adev->dm.cached_state, plane, plane_state, i) {
+               dm_plane_state = to_dm_plane_state(plane_state);
+               if (dm_plane_state->dc_state) {
+                       WARN_ON(kref_read(&dm_plane_state->dc_state->refcount) > 1);
+                       dc_plane_state_release(dm_plane_state->dc_state);
+                       dm_plane_state->dc_state = NULL;
+               }
+       }
+
        ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
 
        drm_atomic_state_put(adev->dm.cached_state);