Commit | Line | Data |
---|---|---|
fb1d9738 JB |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2007 David Airlie | |
4 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | **************************************************************************/ | |
28 | ||
29 | #include "drmP.h" | |
30 | #include "vmwgfx_drv.h" | |
31 | ||
32 | #include "ttm/ttm_placement.h" | |
33 | ||
34 | #define VMW_DIRTY_DELAY (HZ / 30) | |
35 | ||
36 | struct vmw_fb_par { | |
37 | struct vmw_private *vmw_priv; | |
38 | ||
39 | void *vmalloc; | |
40 | ||
41 | struct vmw_dma_buffer *vmw_bo; | |
42 | struct ttm_bo_kmap_obj map; | |
43 | ||
44 | u32 pseudo_palette[17]; | |
45 | ||
46 | unsigned depth; | |
47 | unsigned bpp; | |
48 | ||
49 | unsigned max_width; | |
50 | unsigned max_height; | |
51 | ||
52 | void *bo_ptr; | |
53 | unsigned bo_size; | |
54 | bool bo_iowrite; | |
55 | ||
56 | struct { | |
57 | spinlock_t lock; | |
58 | bool active; | |
59 | unsigned x1; | |
60 | unsigned y1; | |
61 | unsigned x2; | |
62 | unsigned y2; | |
63 | } dirty; | |
64 | }; | |
65 | ||
66 | static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, | |
67 | unsigned blue, unsigned transp, | |
68 | struct fb_info *info) | |
69 | { | |
70 | struct vmw_fb_par *par = info->par; | |
71 | u32 *pal = par->pseudo_palette; | |
72 | ||
73 | if (regno > 15) { | |
74 | DRM_ERROR("Bad regno %u.\n", regno); | |
75 | return 1; | |
76 | } | |
77 | ||
78 | switch (par->depth) { | |
79 | case 24: | |
80 | case 32: | |
81 | pal[regno] = ((red & 0xff00) << 8) | | |
82 | (green & 0xff00) | | |
83 | ((blue & 0xff00) >> 8); | |
84 | break; | |
85 | default: | |
86 | DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp); | |
87 | return 1; | |
88 | } | |
89 | ||
90 | return 0; | |
91 | } | |
92 | ||
93 | static int vmw_fb_check_var(struct fb_var_screeninfo *var, | |
94 | struct fb_info *info) | |
95 | { | |
96 | int depth = var->bits_per_pixel; | |
97 | struct vmw_fb_par *par = info->par; | |
98 | struct vmw_private *vmw_priv = par->vmw_priv; | |
99 | ||
100 | switch (var->bits_per_pixel) { | |
101 | case 32: | |
102 | depth = (var->transp.length > 0) ? 32 : 24; | |
103 | break; | |
104 | default: | |
105 | DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); | |
106 | return -EINVAL; | |
107 | } | |
108 | ||
109 | switch (depth) { | |
110 | case 24: | |
111 | var->red.offset = 16; | |
112 | var->green.offset = 8; | |
113 | var->blue.offset = 0; | |
114 | var->red.length = 8; | |
115 | var->green.length = 8; | |
116 | var->blue.length = 8; | |
117 | var->transp.length = 0; | |
118 | var->transp.offset = 0; | |
119 | break; | |
120 | case 32: | |
121 | var->red.offset = 16; | |
122 | var->green.offset = 8; | |
123 | var->blue.offset = 0; | |
124 | var->red.length = 8; | |
125 | var->green.length = 8; | |
126 | var->blue.length = 8; | |
127 | var->transp.length = 8; | |
128 | var->transp.offset = 24; | |
129 | break; | |
130 | default: | |
131 | DRM_ERROR("Bad depth %u.\n", depth); | |
132 | return -EINVAL; | |
133 | } | |
134 | ||
135 | /* without multimon its hard to resize */ | |
136 | if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && | |
137 | (var->xres != par->max_width || | |
138 | var->yres != par->max_height)) { | |
139 | DRM_ERROR("Tried to resize, but we don't have multimon\n"); | |
140 | return -EINVAL; | |
141 | } | |
142 | ||
143 | if (var->xres > par->max_width || | |
144 | var->yres > par->max_height) { | |
145 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); | |
146 | return -EINVAL; | |
147 | } | |
148 | ||
149 | return 0; | |
150 | } | |
151 | ||
152 | static int vmw_fb_set_par(struct fb_info *info) | |
153 | { | |
154 | struct vmw_fb_par *par = info->par; | |
155 | struct vmw_private *vmw_priv = par->vmw_priv; | |
156 | ||
157 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | |
158 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | |
159 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | |
160 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | |
161 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | |
162 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | |
163 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); | |
164 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | |
165 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | |
166 | ||
167 | vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); | |
168 | vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); | |
169 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); | |
170 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp); | |
171 | vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth); | |
172 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | |
173 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | |
174 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | |
175 | ||
176 | /* TODO check if pitch and offset changes */ | |
177 | ||
178 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | |
179 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | |
180 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | |
181 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset); | |
182 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); | |
183 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); | |
184 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); | |
185 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | |
186 | } else { | |
187 | vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); | |
188 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); | |
189 | ||
190 | /* TODO check if pitch and offset changes */ | |
191 | } | |
192 | ||
193 | return 0; | |
194 | } | |
195 | ||
196 | static int vmw_fb_pan_display(struct fb_var_screeninfo *var, | |
197 | struct fb_info *info) | |
198 | { | |
199 | return 0; | |
200 | } | |
201 | ||
202 | static int vmw_fb_blank(int blank, struct fb_info *info) | |
203 | { | |
204 | return 0; | |
205 | } | |
206 | ||
207 | /* | |
208 | * Dirty code | |
209 | */ | |
210 | ||
211 | static void vmw_fb_dirty_flush(struct vmw_fb_par *par) | |
212 | { | |
213 | struct vmw_private *vmw_priv = par->vmw_priv; | |
214 | struct fb_info *info = vmw_priv->fb_info; | |
215 | int stride = (info->fix.line_length / 4); | |
216 | int *src = (int *)info->screen_base; | |
217 | __le32 __iomem *vram_mem = par->bo_ptr; | |
218 | unsigned long flags; | |
219 | unsigned x, y, w, h; | |
220 | int i, k; | |
221 | struct { | |
222 | uint32_t header; | |
223 | SVGAFifoCmdUpdate body; | |
224 | } *cmd; | |
225 | ||
226 | spin_lock_irqsave(&par->dirty.lock, flags); | |
227 | if (!par->dirty.active) { | |
228 | spin_unlock_irqrestore(&par->dirty.lock, flags); | |
229 | return; | |
230 | } | |
231 | x = par->dirty.x1; | |
232 | y = par->dirty.y1; | |
233 | w = min(par->dirty.x2, info->var.xres) - x; | |
234 | h = min(par->dirty.y2, info->var.yres) - y; | |
235 | par->dirty.x1 = par->dirty.x2 = 0; | |
236 | par->dirty.y1 = par->dirty.y2 = 0; | |
237 | spin_unlock_irqrestore(&par->dirty.lock, flags); | |
238 | ||
239 | for (i = y * stride; i < info->fix.smem_len / 4; i += stride) { | |
240 | for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++) | |
241 | iowrite32(src[k], vram_mem + k); | |
242 | } | |
243 | ||
244 | #if 0 | |
245 | DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h); | |
246 | #endif | |
247 | ||
248 | cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd)); | |
249 | if (unlikely(cmd == NULL)) { | |
250 | DRM_ERROR("Fifo reserve failed.\n"); | |
251 | return; | |
252 | } | |
253 | ||
254 | cmd->header = cpu_to_le32(SVGA_CMD_UPDATE); | |
255 | cmd->body.x = cpu_to_le32(x); | |
256 | cmd->body.y = cpu_to_le32(y); | |
257 | cmd->body.width = cpu_to_le32(w); | |
258 | cmd->body.height = cpu_to_le32(h); | |
259 | vmw_fifo_commit(vmw_priv, sizeof(*cmd)); | |
260 | } | |
261 | ||
262 | static void vmw_fb_dirty_mark(struct vmw_fb_par *par, | |
263 | unsigned x1, unsigned y1, | |
264 | unsigned width, unsigned height) | |
265 | { | |
266 | struct fb_info *info = par->vmw_priv->fb_info; | |
267 | unsigned long flags; | |
268 | unsigned x2 = x1 + width; | |
269 | unsigned y2 = y1 + height; | |
270 | ||
271 | spin_lock_irqsave(&par->dirty.lock, flags); | |
272 | if (par->dirty.x1 == par->dirty.x2) { | |
273 | par->dirty.x1 = x1; | |
274 | par->dirty.y1 = y1; | |
275 | par->dirty.x2 = x2; | |
276 | par->dirty.y2 = y2; | |
277 | /* if we are active start the dirty work | |
278 | * we share the work with the defio system */ | |
279 | if (par->dirty.active) | |
280 | schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY); | |
281 | } else { | |
282 | if (x1 < par->dirty.x1) | |
283 | par->dirty.x1 = x1; | |
284 | if (y1 < par->dirty.y1) | |
285 | par->dirty.y1 = y1; | |
286 | if (x2 > par->dirty.x2) | |
287 | par->dirty.x2 = x2; | |
288 | if (y2 > par->dirty.y2) | |
289 | par->dirty.y2 = y2; | |
290 | } | |
291 | spin_unlock_irqrestore(&par->dirty.lock, flags); | |
292 | } | |
293 | ||
294 | static void vmw_deferred_io(struct fb_info *info, | |
295 | struct list_head *pagelist) | |
296 | { | |
297 | struct vmw_fb_par *par = info->par; | |
298 | unsigned long start, end, min, max; | |
299 | unsigned long flags; | |
300 | struct page *page; | |
301 | int y1, y2; | |
302 | ||
303 | min = ULONG_MAX; | |
304 | max = 0; | |
305 | list_for_each_entry(page, pagelist, lru) { | |
306 | start = page->index << PAGE_SHIFT; | |
307 | end = start + PAGE_SIZE - 1; | |
308 | min = min(min, start); | |
309 | max = max(max, end); | |
310 | } | |
311 | ||
312 | if (min < max) { | |
313 | y1 = min / info->fix.line_length; | |
314 | y2 = (max / info->fix.line_length) + 1; | |
315 | ||
316 | spin_lock_irqsave(&par->dirty.lock, flags); | |
317 | par->dirty.x1 = 0; | |
318 | par->dirty.y1 = y1; | |
319 | par->dirty.x2 = info->var.xres; | |
320 | par->dirty.y2 = y2; | |
321 | spin_unlock_irqrestore(&par->dirty.lock, flags); | |
322 | } | |
323 | ||
324 | vmw_fb_dirty_flush(par); | |
325 | }; | |
326 | ||
327 | struct fb_deferred_io vmw_defio = { | |
328 | .delay = VMW_DIRTY_DELAY, | |
329 | .deferred_io = vmw_deferred_io, | |
330 | }; | |
331 | ||
332 | /* | |
333 | * Draw code | |
334 | */ | |
335 | ||
336 | static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |
337 | { | |
338 | cfb_fillrect(info, rect); | |
339 | vmw_fb_dirty_mark(info->par, rect->dx, rect->dy, | |
340 | rect->width, rect->height); | |
341 | } | |
342 | ||
343 | static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |
344 | { | |
345 | cfb_copyarea(info, region); | |
346 | vmw_fb_dirty_mark(info->par, region->dx, region->dy, | |
347 | region->width, region->height); | |
348 | } | |
349 | ||
350 | static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) | |
351 | { | |
352 | cfb_imageblit(info, image); | |
353 | vmw_fb_dirty_mark(info->par, image->dx, image->dy, | |
354 | image->width, image->height); | |
355 | } | |
356 | ||
357 | /* | |
358 | * Bring up code | |
359 | */ | |
360 | ||
361 | static struct fb_ops vmw_fb_ops = { | |
362 | .owner = THIS_MODULE, | |
363 | .fb_check_var = vmw_fb_check_var, | |
364 | .fb_set_par = vmw_fb_set_par, | |
365 | .fb_setcolreg = vmw_fb_setcolreg, | |
366 | .fb_fillrect = vmw_fb_fillrect, | |
367 | .fb_copyarea = vmw_fb_copyarea, | |
368 | .fb_imageblit = vmw_fb_imageblit, | |
369 | .fb_pan_display = vmw_fb_pan_display, | |
370 | .fb_blank = vmw_fb_blank, | |
371 | }; | |
372 | ||
373 | static int vmw_fb_create_bo(struct vmw_private *vmw_priv, | |
374 | size_t size, struct vmw_dma_buffer **out) | |
375 | { | |
376 | struct vmw_dma_buffer *vmw_bo; | |
377 | struct ttm_placement ne_placement = vmw_vram_ne_placement; | |
378 | int ret; | |
379 | ||
380 | ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
381 | ||
382 | /* interuptable? */ | |
383 | ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false); | |
384 | if (unlikely(ret != 0)) | |
385 | return ret; | |
386 | ||
387 | vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); | |
388 | if (!vmw_bo) | |
389 | goto err_unlock; | |
390 | ||
391 | ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, | |
392 | &ne_placement, | |
393 | false, | |
394 | &vmw_dmabuf_bo_free); | |
395 | if (unlikely(ret != 0)) | |
396 | goto err_unlock; /* init frees the buffer on failure */ | |
397 | ||
398 | *out = vmw_bo; | |
399 | ||
400 | ttm_write_unlock(&vmw_priv->fbdev_master.lock); | |
401 | ||
402 | return 0; | |
403 | ||
404 | err_unlock: | |
405 | ttm_write_unlock(&vmw_priv->fbdev_master.lock); | |
406 | return ret; | |
407 | } | |
408 | ||
409 | int vmw_fb_init(struct vmw_private *vmw_priv) | |
410 | { | |
411 | struct device *device = &vmw_priv->dev->pdev->dev; | |
412 | struct vmw_fb_par *par; | |
413 | struct fb_info *info; | |
414 | unsigned initial_width, initial_height; | |
415 | unsigned fb_width, fb_height; | |
416 | unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; | |
417 | int ret; | |
418 | ||
419 | initial_width = 800; | |
420 | initial_height = 600; | |
421 | ||
422 | fb_bbp = 32; | |
423 | fb_depth = 24; | |
424 | ||
425 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | |
426 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); | |
427 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); | |
428 | } else { | |
429 | fb_width = min(vmw_priv->fb_max_width, initial_width); | |
430 | fb_height = min(vmw_priv->fb_max_height, initial_height); | |
431 | } | |
432 | ||
433 | initial_width = min(fb_width, initial_width); | |
434 | initial_height = min(fb_height, initial_height); | |
435 | ||
436 | vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); | |
437 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); | |
438 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp); | |
439 | vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth); | |
440 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | |
441 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | |
442 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | |
443 | ||
444 | fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE); | |
445 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); | |
446 | fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE); | |
447 | ||
448 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH)); | |
449 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT)); | |
450 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH)); | |
451 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT)); | |
452 | DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL)); | |
453 | DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH)); | |
454 | DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE)); | |
455 | DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK)); | |
456 | DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK)); | |
457 | DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK)); | |
458 | DRM_DEBUG("fb_offset 0x%08x\n", fb_offset); | |
459 | DRM_DEBUG("fb_pitch %u\n", fb_pitch); | |
460 | DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024); | |
461 | ||
462 | info = framebuffer_alloc(sizeof(*par), device); | |
463 | if (!info) | |
464 | return -ENOMEM; | |
465 | ||
466 | /* | |
467 | * Par | |
468 | */ | |
469 | vmw_priv->fb_info = info; | |
470 | par = info->par; | |
471 | par->vmw_priv = vmw_priv; | |
472 | par->depth = fb_depth; | |
473 | par->bpp = fb_bbp; | |
474 | par->vmalloc = NULL; | |
475 | par->max_width = fb_width; | |
476 | par->max_height = fb_height; | |
477 | ||
478 | /* | |
479 | * Create buffers and alloc memory | |
480 | */ | |
481 | par->vmalloc = vmalloc(fb_size); | |
482 | if (unlikely(par->vmalloc == NULL)) { | |
483 | ret = -ENOMEM; | |
484 | goto err_free; | |
485 | } | |
486 | ||
487 | ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo); | |
488 | if (unlikely(ret != 0)) | |
489 | goto err_free; | |
490 | ||
491 | ret = ttm_bo_kmap(&par->vmw_bo->base, | |
492 | 0, | |
493 | par->vmw_bo->base.num_pages, | |
494 | &par->map); | |
495 | if (unlikely(ret != 0)) | |
496 | goto err_unref; | |
497 | par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); | |
498 | par->bo_size = fb_size; | |
499 | ||
500 | /* | |
501 | * Fixed and var | |
502 | */ | |
503 | strcpy(info->fix.id, "svgadrmfb"); | |
504 | info->fix.type = FB_TYPE_PACKED_PIXELS; | |
505 | info->fix.visual = FB_VISUAL_TRUECOLOR; | |
506 | info->fix.type_aux = 0; | |
507 | info->fix.xpanstep = 1; /* doing it in hw */ | |
508 | info->fix.ypanstep = 1; /* doing it in hw */ | |
509 | info->fix.ywrapstep = 0; | |
510 | info->fix.accel = FB_ACCEL_NONE; | |
511 | info->fix.line_length = fb_pitch; | |
512 | ||
513 | info->fix.smem_start = 0; | |
514 | info->fix.smem_len = fb_size; | |
515 | ||
516 | info->fix.mmio_start = 0; | |
517 | info->fix.mmio_len = 0; | |
518 | ||
519 | info->pseudo_palette = par->pseudo_palette; | |
520 | info->screen_base = par->vmalloc; | |
521 | info->screen_size = fb_size; | |
522 | ||
523 | info->flags = FBINFO_DEFAULT; | |
524 | info->fbops = &vmw_fb_ops; | |
525 | ||
526 | /* 24 depth per default */ | |
527 | info->var.red.offset = 16; | |
528 | info->var.green.offset = 8; | |
529 | info->var.blue.offset = 0; | |
530 | info->var.red.length = 8; | |
531 | info->var.green.length = 8; | |
532 | info->var.blue.length = 8; | |
533 | info->var.transp.offset = 0; | |
534 | info->var.transp.length = 0; | |
535 | ||
536 | info->var.xres_virtual = fb_width; | |
537 | info->var.yres_virtual = fb_height; | |
538 | info->var.bits_per_pixel = par->bpp; | |
539 | info->var.xoffset = 0; | |
540 | info->var.yoffset = 0; | |
541 | info->var.activate = FB_ACTIVATE_NOW; | |
542 | info->var.height = -1; | |
543 | info->var.width = -1; | |
544 | ||
545 | info->var.xres = initial_width; | |
546 | info->var.yres = initial_height; | |
547 | ||
548 | #if 0 | |
549 | info->pixmap.size = 64*1024; | |
550 | info->pixmap.buf_align = 8; | |
551 | info->pixmap.access_align = 32; | |
552 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | |
553 | info->pixmap.scan_align = 1; | |
554 | #else | |
555 | info->pixmap.size = 0; | |
556 | info->pixmap.buf_align = 8; | |
557 | info->pixmap.access_align = 32; | |
558 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | |
559 | info->pixmap.scan_align = 1; | |
560 | #endif | |
561 | ||
1471ca9a MS |
562 | info->apertures = alloc_apertures(1); |
563 | if (!info->apertures) { | |
564 | ret = -ENOMEM; | |
565 | goto err_aper; | |
566 | } | |
567 | info->apertures->ranges[0].base = vmw_priv->vram_start; | |
568 | info->apertures->ranges[0].size = vmw_priv->vram_size; | |
f2d12b8e | 569 | |
fb1d9738 JB |
570 | /* |
571 | * Dirty & Deferred IO | |
572 | */ | |
573 | par->dirty.x1 = par->dirty.x2 = 0; | |
574 | par->dirty.y1 = par->dirty.y1 = 0; | |
575 | par->dirty.active = true; | |
576 | spin_lock_init(&par->dirty.lock); | |
577 | info->fbdefio = &vmw_defio; | |
578 | fb_deferred_io_init(info); | |
579 | ||
580 | ret = register_framebuffer(info); | |
581 | if (unlikely(ret != 0)) | |
582 | goto err_defio; | |
583 | ||
584 | return 0; | |
585 | ||
586 | err_defio: | |
587 | fb_deferred_io_cleanup(info); | |
1471ca9a | 588 | err_aper: |
fb1d9738 JB |
589 | ttm_bo_kunmap(&par->map); |
590 | err_unref: | |
591 | ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); | |
592 | err_free: | |
593 | vfree(par->vmalloc); | |
594 | framebuffer_release(info); | |
595 | vmw_priv->fb_info = NULL; | |
596 | ||
597 | return ret; | |
598 | } | |
599 | ||
600 | int vmw_fb_close(struct vmw_private *vmw_priv) | |
601 | { | |
602 | struct fb_info *info; | |
603 | struct vmw_fb_par *par; | |
604 | struct ttm_buffer_object *bo; | |
605 | ||
606 | if (!vmw_priv->fb_info) | |
607 | return 0; | |
608 | ||
609 | info = vmw_priv->fb_info; | |
610 | par = info->par; | |
611 | bo = &par->vmw_bo->base; | |
612 | par->vmw_bo = NULL; | |
613 | ||
614 | /* ??? order */ | |
615 | fb_deferred_io_cleanup(info); | |
616 | unregister_framebuffer(info); | |
617 | ||
618 | ttm_bo_kunmap(&par->map); | |
619 | ttm_bo_unref(&bo); | |
620 | ||
621 | vfree(par->vmalloc); | |
622 | framebuffer_release(info); | |
623 | ||
624 | return 0; | |
625 | } | |
626 | ||
627 | int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | |
628 | struct vmw_dma_buffer *vmw_bo) | |
629 | { | |
630 | struct ttm_buffer_object *bo = &vmw_bo->base; | |
631 | int ret = 0; | |
632 | ||
633 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
634 | if (unlikely(ret != 0)) | |
635 | return ret; | |
636 | ||
9d87fa21 | 637 | ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); |
fb1d9738 JB |
638 | ttm_bo_unreserve(bo); |
639 | ||
640 | return ret; | |
641 | } | |
642 | ||
643 | int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |
644 | struct vmw_dma_buffer *vmw_bo) | |
645 | { | |
646 | struct ttm_buffer_object *bo = &vmw_bo->base; | |
647 | struct ttm_placement ne_placement = vmw_vram_ne_placement; | |
648 | int ret = 0; | |
649 | ||
650 | ne_placement.lpfn = bo->num_pages; | |
651 | ||
652 | /* interuptable? */ | |
653 | ret = ttm_write_lock(&vmw_priv->active_master->lock, false); | |
654 | if (unlikely(ret != 0)) | |
655 | return ret; | |
656 | ||
657 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
658 | if (unlikely(ret != 0)) | |
659 | goto err_unlock; | |
660 | ||
9d87fa21 | 661 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
fb1d9738 JB |
662 | ttm_bo_unreserve(bo); |
663 | err_unlock: | |
664 | ttm_write_unlock(&vmw_priv->active_master->lock); | |
665 | ||
666 | return ret; | |
667 | } | |
668 | ||
669 | int vmw_fb_off(struct vmw_private *vmw_priv) | |
670 | { | |
671 | struct fb_info *info; | |
672 | struct vmw_fb_par *par; | |
673 | unsigned long flags; | |
674 | ||
675 | if (!vmw_priv->fb_info) | |
676 | return -EINVAL; | |
677 | ||
678 | info = vmw_priv->fb_info; | |
679 | par = info->par; | |
680 | ||
681 | spin_lock_irqsave(&par->dirty.lock, flags); | |
682 | par->dirty.active = false; | |
683 | spin_unlock_irqrestore(&par->dirty.lock, flags); | |
684 | ||
685 | flush_scheduled_work(); | |
686 | ||
687 | par->bo_ptr = NULL; | |
688 | ttm_bo_kunmap(&par->map); | |
689 | ||
690 | vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo); | |
691 | ||
692 | return 0; | |
693 | } | |
694 | ||
695 | int vmw_fb_on(struct vmw_private *vmw_priv) | |
696 | { | |
697 | struct fb_info *info; | |
698 | struct vmw_fb_par *par; | |
699 | unsigned long flags; | |
700 | bool dummy; | |
701 | int ret; | |
702 | ||
703 | if (!vmw_priv->fb_info) | |
704 | return -EINVAL; | |
705 | ||
706 | info = vmw_priv->fb_info; | |
707 | par = info->par; | |
708 | ||
709 | /* we are already active */ | |
710 | if (par->bo_ptr != NULL) | |
711 | return 0; | |
712 | ||
713 | /* Make sure that all overlays are stoped when we take over */ | |
714 | vmw_overlay_stop_all(vmw_priv); | |
715 | ||
716 | ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo); | |
717 | if (unlikely(ret != 0)) { | |
718 | DRM_ERROR("could not move buffer to start of VRAM\n"); | |
719 | goto err_no_buffer; | |
720 | } | |
721 | ||
722 | ret = ttm_bo_kmap(&par->vmw_bo->base, | |
723 | 0, | |
724 | par->vmw_bo->base.num_pages, | |
725 | &par->map); | |
726 | BUG_ON(ret != 0); | |
727 | par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy); | |
728 | ||
729 | spin_lock_irqsave(&par->dirty.lock, flags); | |
730 | par->dirty.active = true; | |
731 | spin_unlock_irqrestore(&par->dirty.lock, flags); | |
732 | ||
733 | err_no_buffer: | |
734 | vmw_fb_set_par(info); | |
735 | ||
736 | vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres); | |
737 | ||
738 | /* If there already was stuff dirty we wont | |
739 | * schedule a new work, so lets do it now */ | |
740 | schedule_delayed_work(&info->deferred_work, 0); | |
741 | ||
742 | return 0; | |
743 | } |