Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright © 2007 David Airlie | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * David Airlie | |
25 | */ | |
26 | #include <linux/module.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/fb.h> | |
29 | ||
30 | #include <drm/drmP.h> | |
31 | #include <drm/drm_crtc.h> | |
32 | #include <drm/drm_crtc_helper.h> | |
33 | #include <drm/amdgpu_drm.h> | |
34 | #include "amdgpu.h" | |
fbd76d59 | 35 | #include "cikd.h" |
d38ceaf9 AD |
36 | |
37 | #include <drm/drm_fb_helper.h> | |
38 | ||
39 | #include <linux/vga_switcheroo.h> | |
40 | ||
41 | /* object hierarchy - | |
42 | this contains a helper + a amdgpu fb | |
43 | the helper contains a pointer to amdgpu framebuffer baseclass. | |
44 | */ | |
45 | struct amdgpu_fbdev { | |
46 | struct drm_fb_helper helper; | |
47 | struct amdgpu_framebuffer rfb; | |
48 | struct list_head fbdev_list; | |
49 | struct amdgpu_device *adev; | |
50 | }; | |
51 | ||
52 | static struct fb_ops amdgpufb_ops = { | |
53 | .owner = THIS_MODULE, | |
54 | .fb_check_var = drm_fb_helper_check_var, | |
55 | .fb_set_par = drm_fb_helper_set_par, | |
2dbaf392 AT |
56 | .fb_fillrect = drm_fb_helper_cfb_fillrect, |
57 | .fb_copyarea = drm_fb_helper_cfb_copyarea, | |
58 | .fb_imageblit = drm_fb_helper_cfb_imageblit, | |
d38ceaf9 AD |
59 | .fb_pan_display = drm_fb_helper_pan_display, |
60 | .fb_blank = drm_fb_helper_blank, | |
61 | .fb_setcmap = drm_fb_helper_setcmap, | |
62 | .fb_debug_enter = drm_fb_helper_debug_enter, | |
63 | .fb_debug_leave = drm_fb_helper_debug_leave, | |
64 | }; | |
65 | ||
66 | ||
67 | int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled) | |
68 | { | |
69 | int aligned = width; | |
70 | int pitch_mask = 0; | |
71 | ||
72 | switch (bpp / 8) { | |
73 | case 1: | |
74 | pitch_mask = 255; | |
75 | break; | |
76 | case 2: | |
77 | pitch_mask = 127; | |
78 | break; | |
79 | case 3: | |
80 | case 4: | |
81 | pitch_mask = 63; | |
82 | break; | |
83 | } | |
84 | ||
85 | aligned += pitch_mask; | |
86 | aligned &= ~pitch_mask; | |
87 | return aligned; | |
88 | } | |
89 | ||
90 | static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) | |
91 | { | |
92 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj); | |
93 | int ret; | |
94 | ||
95 | ret = amdgpu_bo_reserve(rbo, false); | |
96 | if (likely(ret == 0)) { | |
97 | amdgpu_bo_kunmap(rbo); | |
98 | amdgpu_bo_unpin(rbo); | |
99 | amdgpu_bo_unreserve(rbo); | |
100 | } | |
101 | drm_gem_object_unreference_unlocked(gobj); | |
102 | } | |
103 | ||
104 | static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | |
105 | struct drm_mode_fb_cmd2 *mode_cmd, | |
106 | struct drm_gem_object **gobj_p) | |
107 | { | |
108 | struct amdgpu_device *adev = rfbdev->adev; | |
109 | struct drm_gem_object *gobj = NULL; | |
110 | struct amdgpu_bo *rbo = NULL; | |
111 | bool fb_tiled = false; /* useful for testing */ | |
112 | u32 tiling_flags = 0; | |
113 | int ret; | |
114 | int aligned_size, size; | |
115 | int height = mode_cmd->height; | |
116 | u32 bpp, depth; | |
117 | ||
118 | drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); | |
119 | ||
120 | /* need to align pitch with crtc limits */ | |
121 | mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp, | |
122 | fb_tiled) * ((bpp + 1) / 8); | |
123 | ||
124 | height = ALIGN(mode_cmd->height, 8); | |
125 | size = mode_cmd->pitches[0] * height; | |
126 | aligned_size = ALIGN(size, PAGE_SIZE); | |
127 | ret = amdgpu_gem_object_create(adev, aligned_size, 0, | |
128 | AMDGPU_GEM_DOMAIN_VRAM, | |
857d913d AD |
129 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
130 | true, &gobj); | |
d38ceaf9 AD |
131 | if (ret) { |
132 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", | |
133 | aligned_size); | |
134 | return -ENOMEM; | |
135 | } | |
136 | rbo = gem_to_amdgpu_bo(gobj); | |
137 | ||
138 | if (fb_tiled) | |
fbd76d59 | 139 | tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); |
d38ceaf9 | 140 | |
d38ceaf9 AD |
141 | ret = amdgpu_bo_reserve(rbo, false); |
142 | if (unlikely(ret != 0)) | |
143 | goto out_unref; | |
144 | ||
145 | if (tiling_flags) { | |
146 | ret = amdgpu_bo_set_tiling_flags(rbo, | |
63ab1c2b | 147 | tiling_flags); |
d38ceaf9 AD |
148 | if (ret) |
149 | dev_err(adev->dev, "FB failed to set tiling flags\n"); | |
150 | } | |
151 | ||
152 | ||
7e5a547f | 153 | ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL); |
d38ceaf9 AD |
154 | if (ret) { |
155 | amdgpu_bo_unreserve(rbo); | |
156 | goto out_unref; | |
157 | } | |
158 | ret = amdgpu_bo_kmap(rbo, NULL); | |
159 | amdgpu_bo_unreserve(rbo); | |
160 | if (ret) { | |
161 | goto out_unref; | |
162 | } | |
163 | ||
164 | *gobj_p = gobj; | |
165 | return 0; | |
166 | out_unref: | |
167 | amdgpufb_destroy_pinned_object(gobj); | |
168 | *gobj_p = NULL; | |
169 | return ret; | |
170 | } | |
171 | ||
172 | static int amdgpufb_create(struct drm_fb_helper *helper, | |
173 | struct drm_fb_helper_surface_size *sizes) | |
174 | { | |
175 | struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; | |
176 | struct amdgpu_device *adev = rfbdev->adev; | |
177 | struct fb_info *info; | |
178 | struct drm_framebuffer *fb = NULL; | |
179 | struct drm_mode_fb_cmd2 mode_cmd; | |
180 | struct drm_gem_object *gobj = NULL; | |
181 | struct amdgpu_bo *rbo = NULL; | |
d38ceaf9 AD |
182 | int ret; |
183 | unsigned long tmp; | |
184 | ||
185 | mode_cmd.width = sizes->surface_width; | |
186 | mode_cmd.height = sizes->surface_height; | |
187 | ||
188 | if (sizes->surface_bpp == 24) | |
189 | sizes->surface_bpp = 32; | |
190 | ||
191 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | |
192 | sizes->surface_depth); | |
193 | ||
194 | ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); | |
195 | if (ret) { | |
196 | DRM_ERROR("failed to create fbcon object %d\n", ret); | |
197 | return ret; | |
198 | } | |
199 | ||
200 | rbo = gem_to_amdgpu_bo(gobj); | |
201 | ||
202 | /* okay we have an object now allocate the framebuffer */ | |
2dbaf392 AT |
203 | info = drm_fb_helper_alloc_fbi(helper); |
204 | if (IS_ERR(info)) { | |
205 | ret = PTR_ERR(info); | |
d38ceaf9 AD |
206 | goto out_unref; |
207 | } | |
208 | ||
209 | info->par = rfbdev; | |
210 | ||
211 | ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); | |
212 | if (ret) { | |
213 | DRM_ERROR("failed to initialize framebuffer %d\n", ret); | |
2dbaf392 | 214 | goto out_destroy_fbi; |
d38ceaf9 AD |
215 | } |
216 | ||
217 | fb = &rfbdev->rfb.base; | |
218 | ||
219 | /* setup helper */ | |
220 | rfbdev->helper.fb = fb; | |
d38ceaf9 AD |
221 | |
222 | memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo)); | |
223 | ||
224 | strcpy(info->fix.id, "amdgpudrmfb"); | |
225 | ||
226 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | |
227 | ||
228 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | |
229 | info->fbops = &amdgpufb_ops; | |
230 | ||
231 | tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start; | |
232 | info->fix.smem_start = adev->mc.aper_base + tmp; | |
233 | info->fix.smem_len = amdgpu_bo_size(rbo); | |
234 | info->screen_base = rbo->kptr; | |
235 | info->screen_size = amdgpu_bo_size(rbo); | |
236 | ||
237 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); | |
238 | ||
239 | /* setup aperture base/size for vesafb takeover */ | |
d38ceaf9 AD |
240 | info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; |
241 | info->apertures->ranges[0].size = adev->mc.aper_size; | |
242 | ||
243 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ | |
244 | ||
245 | if (info->screen_base == NULL) { | |
246 | ret = -ENOSPC; | |
2dbaf392 | 247 | goto out_destroy_fbi; |
d38ceaf9 AD |
248 | } |
249 | ||
250 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); | |
251 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); | |
252 | DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo)); | |
253 | DRM_INFO("fb depth is %d\n", fb->depth); | |
254 | DRM_INFO(" pitch is %d\n", fb->pitches[0]); | |
255 | ||
256 | vga_switcheroo_client_fb_set(adev->ddev->pdev, info); | |
257 | return 0; | |
258 | ||
2dbaf392 AT |
259 | out_destroy_fbi: |
260 | drm_fb_helper_release_fbi(helper); | |
d38ceaf9 AD |
261 | out_unref: |
262 | if (rbo) { | |
263 | ||
264 | } | |
265 | if (fb && ret) { | |
266 | drm_gem_object_unreference(gobj); | |
267 | drm_framebuffer_unregister_private(fb); | |
268 | drm_framebuffer_cleanup(fb); | |
269 | kfree(fb); | |
270 | } | |
271 | return ret; | |
272 | } | |
273 | ||
274 | void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev) | |
275 | { | |
276 | if (adev->mode_info.rfbdev) | |
277 | drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper); | |
278 | } | |
279 | ||
280 | static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) | |
281 | { | |
d38ceaf9 AD |
282 | struct amdgpu_framebuffer *rfb = &rfbdev->rfb; |
283 | ||
2dbaf392 AT |
284 | drm_fb_helper_unregister_fbi(&rfbdev->helper); |
285 | drm_fb_helper_release_fbi(&rfbdev->helper); | |
d38ceaf9 AD |
286 | |
287 | if (rfb->obj) { | |
288 | amdgpufb_destroy_pinned_object(rfb->obj); | |
289 | rfb->obj = NULL; | |
290 | } | |
291 | drm_fb_helper_fini(&rfbdev->helper); | |
292 | drm_framebuffer_unregister_private(&rfb->base); | |
293 | drm_framebuffer_cleanup(&rfb->base); | |
294 | ||
295 | return 0; | |
296 | } | |
297 | ||
298 | /** Sets the color ramps on behalf of fbcon */ | |
299 | static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |
300 | u16 blue, int regno) | |
301 | { | |
302 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | |
303 | ||
304 | amdgpu_crtc->lut_r[regno] = red >> 6; | |
305 | amdgpu_crtc->lut_g[regno] = green >> 6; | |
306 | amdgpu_crtc->lut_b[regno] = blue >> 6; | |
307 | } | |
308 | ||
309 | /** Gets the color ramps on behalf of fbcon */ | |
310 | static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | |
311 | u16 *blue, int regno) | |
312 | { | |
313 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | |
314 | ||
315 | *red = amdgpu_crtc->lut_r[regno] << 6; | |
316 | *green = amdgpu_crtc->lut_g[regno] << 6; | |
317 | *blue = amdgpu_crtc->lut_b[regno] << 6; | |
318 | } | |
319 | ||
320 | static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { | |
321 | .gamma_set = amdgpu_crtc_fb_gamma_set, | |
322 | .gamma_get = amdgpu_crtc_fb_gamma_get, | |
323 | .fb_probe = amdgpufb_create, | |
324 | }; | |
325 | ||
326 | int amdgpu_fbdev_init(struct amdgpu_device *adev) | |
327 | { | |
328 | struct amdgpu_fbdev *rfbdev; | |
329 | int bpp_sel = 32; | |
330 | int ret; | |
331 | ||
332 | /* don't init fbdev on hw without DCE */ | |
333 | if (!adev->mode_info.mode_config_initialized) | |
334 | return 0; | |
335 | ||
336 | /* select 8 bpp console on low vram cards */ | |
337 | if (adev->mc.real_vram_size <= (32*1024*1024)) | |
338 | bpp_sel = 8; | |
339 | ||
340 | rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); | |
341 | if (!rfbdev) | |
342 | return -ENOMEM; | |
343 | ||
344 | rfbdev->adev = adev; | |
345 | adev->mode_info.rfbdev = rfbdev; | |
346 | ||
347 | drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, | |
348 | &amdgpu_fb_helper_funcs); | |
349 | ||
350 | ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, | |
351 | adev->mode_info.num_crtc, | |
352 | AMDGPUFB_CONN_LIMIT); | |
353 | if (ret) { | |
354 | kfree(rfbdev); | |
355 | return ret; | |
356 | } | |
357 | ||
358 | drm_fb_helper_single_add_all_connectors(&rfbdev->helper); | |
359 | ||
360 | /* disable all the possible outputs/crtcs before entering KMS mode */ | |
361 | drm_helper_disable_unused_functions(adev->ddev); | |
362 | ||
363 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); | |
364 | return 0; | |
365 | } | |
366 | ||
367 | void amdgpu_fbdev_fini(struct amdgpu_device *adev) | |
368 | { | |
369 | if (!adev->mode_info.rfbdev) | |
370 | return; | |
371 | ||
372 | amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev); | |
373 | kfree(adev->mode_info.rfbdev); | |
374 | adev->mode_info.rfbdev = NULL; | |
375 | } | |
376 | ||
377 | void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) | |
378 | { | |
379 | if (adev->mode_info.rfbdev) | |
2dbaf392 AT |
380 | drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper, |
381 | state); | |
d38ceaf9 AD |
382 | } |
383 | ||
384 | int amdgpu_fbdev_total_size(struct amdgpu_device *adev) | |
385 | { | |
386 | struct amdgpu_bo *robj; | |
387 | int size = 0; | |
388 | ||
389 | if (!adev->mode_info.rfbdev) | |
390 | return 0; | |
391 | ||
392 | robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj); | |
393 | size += amdgpu_bo_size(robj); | |
394 | return size; | |
395 | } | |
396 | ||
397 | bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) | |
398 | { | |
399 | if (!adev->mode_info.rfbdev) | |
400 | return false; | |
401 | if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj)) | |
402 | return true; | |
403 | return false; | |
404 | } | |
8b7530b1 AD |
405 | |
406 | void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) | |
407 | { | |
408 | struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; | |
409 | struct drm_fb_helper *fb_helper; | |
410 | int ret; | |
411 | ||
412 | if (!afbdev) | |
413 | return; | |
414 | ||
415 | fb_helper = &afbdev->helper; | |
416 | ||
417 | ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); | |
418 | if (ret) | |
419 | DRM_DEBUG("failed to restore crtc mode\n"); | |
420 | } |