drm/kms: driver for virtual cirrus under qemu
[linux-block.git] / drivers / gpu / drm / cirrus / cirrus_main.c
CommitLineData
f9aa76a8
DA
1/*
2 * Copyright 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Authors: Matthew Garrett
9 * Dave Airlie
10 */
11#include "drmP.h"
12#include "drm.h"
13#include "drm_crtc_helper.h"
14
15#include "cirrus_drv.h"
16
17
18static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
19{
20 struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
21 if (cirrus_fb->obj)
22 drm_gem_object_unreference_unlocked(cirrus_fb->obj);
23 drm_framebuffer_cleanup(fb);
24 kfree(fb);
25}
26
27static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
28 struct drm_file *file_priv,
29 unsigned int *handle)
30{
31 return 0;
32}
33
34static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
35 .destroy = cirrus_user_framebuffer_destroy,
36 .create_handle = cirrus_user_framebuffer_create_handle,
37};
38
39int cirrus_framebuffer_init(struct drm_device *dev,
40 struct cirrus_framebuffer *gfb,
41 struct drm_mode_fb_cmd2 *mode_cmd,
42 struct drm_gem_object *obj)
43{
44 int ret;
45
46 ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
47 if (ret) {
48 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
49 return ret;
50 }
51 drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
52 gfb->obj = obj;
53 return 0;
54}
55
56static struct drm_framebuffer *
57cirrus_user_framebuffer_create(struct drm_device *dev,
58 struct drm_file *filp,
59 struct drm_mode_fb_cmd2 *mode_cmd)
60{
61 struct drm_gem_object *obj;
62 struct cirrus_framebuffer *cirrus_fb;
63 int ret;
64 u32 bpp, depth;
65
66 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
67 /* cirrus can't handle > 24bpp framebuffers at all */
68 if (bpp > 24)
69 return ERR_PTR(-EINVAL);
70
71 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
72 if (obj == NULL)
73 return ERR_PTR(-ENOENT);
74
75 cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
76 if (!cirrus_fb) {
77 drm_gem_object_unreference_unlocked(obj);
78 return ERR_PTR(-ENOMEM);
79 }
80
81 ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
82 if (ret) {
83 drm_gem_object_unreference_unlocked(obj);
84 kfree(cirrus_fb);
85 return ERR_PTR(ret);
86 }
87 return &cirrus_fb->base;
88}
89
90static const struct drm_mode_config_funcs cirrus_mode_funcs = {
91 .fb_create = cirrus_user_framebuffer_create,
92};
93
94/* Unmap the framebuffer from the core and release the memory */
95static void cirrus_vram_fini(struct cirrus_device *cdev)
96{
97 iounmap(cdev->rmmio);
98 cdev->rmmio = NULL;
99 if (cdev->mc.vram_base)
100 release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
101}
102
103/* Map the framebuffer from the card and configure the core */
104static int cirrus_vram_init(struct cirrus_device *cdev)
105{
106 /* BAR 0 is VRAM */
107 cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
108 /* We have 4MB of VRAM */
109 cdev->mc.vram_size = 4 * 1024 * 1024;
110
111 if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
112 "cirrusdrmfb_vram")) {
113 DRM_ERROR("can't reserve VRAM\n");
114 return -ENXIO;
115 }
116
117 return 0;
118}
119
120/*
121 * Our emulated hardware has two sets of memory. One is video RAM and can
122 * simply be used as a linear framebuffer - the other provides mmio access
123 * to the display registers. The latter can also be accessed via IO port
124 * access, but we map the range and use mmio to program them instead
125 */
126
127int cirrus_device_init(struct cirrus_device *cdev,
128 struct drm_device *ddev,
129 struct pci_dev *pdev, uint32_t flags)
130{
131 int ret;
132
133 cdev->dev = ddev;
134 cdev->flags = flags;
135
136 /* Hardcode the number of CRTCs to 1 */
137 cdev->num_crtc = 1;
138
139 /* BAR 0 is the framebuffer, BAR 1 contains registers */
140 cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
141 cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
142
143 if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
144 "cirrusdrmfb_mmio")) {
145 DRM_ERROR("can't reserve mmio registers\n");
146 return -ENOMEM;
147 }
148
149 cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
150
151 if (cdev->rmmio == NULL)
152 return -ENOMEM;
153
154 ret = cirrus_vram_init(cdev);
155 if (ret) {
156 release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
157 return ret;
158 }
159
160 return 0;
161}
162
163void cirrus_device_fini(struct cirrus_device *cdev)
164{
165 release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
166 cirrus_vram_fini(cdev);
167}
168
169/*
170 * Functions here will be called by the core once it's bound the driver to
171 * a PCI device
172 */
173
174int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
175{
176 struct cirrus_device *cdev;
177 int r;
178
179 cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
180 if (cdev == NULL)
181 return -ENOMEM;
182 dev->dev_private = (void *)cdev;
183
184 r = cirrus_device_init(cdev, dev, dev->pdev, flags);
185 if (r) {
186 dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
187 goto out;
188 }
189
190 r = cirrus_mm_init(cdev);
191 if (r)
192 dev_err(&dev->pdev->dev, "fatal err on mm init\n");
193
194 r = cirrus_modeset_init(cdev);
195 if (r)
196 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
197
198 dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
199out:
200 if (r)
201 cirrus_driver_unload(dev);
202 return r;
203}
204
205int cirrus_driver_unload(struct drm_device *dev)
206{
207 struct cirrus_device *cdev = dev->dev_private;
208
209 if (cdev == NULL)
210 return 0;
211 cirrus_modeset_fini(cdev);
212 cirrus_mm_fini(cdev);
213 cirrus_device_fini(cdev);
214 kfree(cdev);
215 dev->dev_private = NULL;
216 return 0;
217}
218
219int cirrus_gem_create(struct drm_device *dev,
220 u32 size, bool iskernel,
221 struct drm_gem_object **obj)
222{
223 struct cirrus_bo *cirrusbo;
224 int ret;
225
226 *obj = NULL;
227
228 size = roundup(size, PAGE_SIZE);
229 if (size == 0)
230 return -EINVAL;
231
232 ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
233 if (ret) {
234 if (ret != -ERESTARTSYS)
235 DRM_ERROR("failed to allocate GEM object\n");
236 return ret;
237 }
238 *obj = &cirrusbo->gem;
239 return 0;
240}
241
242int cirrus_dumb_create(struct drm_file *file,
243 struct drm_device *dev,
244 struct drm_mode_create_dumb *args)
245{
246 int ret;
247 struct drm_gem_object *gobj;
248 u32 handle;
249
250 args->pitch = args->width * ((args->bpp + 7) / 8);
251 args->size = args->pitch * args->height;
252
253 ret = cirrus_gem_create(dev, args->size, false,
254 &gobj);
255 if (ret)
256 return ret;
257
258 ret = drm_gem_handle_create(file, gobj, &handle);
259 drm_gem_object_unreference_unlocked(gobj);
260 if (ret)
261 return ret;
262
263 args->handle = handle;
264 return 0;
265}
266
267int cirrus_dumb_destroy(struct drm_file *file,
268 struct drm_device *dev,
269 uint32_t handle)
270{
271 return drm_gem_handle_delete(file, handle);
272}
273
274int cirrus_gem_init_object(struct drm_gem_object *obj)
275{
276 BUG();
277 return 0;
278}
279
280void cirrus_bo_unref(struct cirrus_bo **bo)
281{
282 struct ttm_buffer_object *tbo;
283
284 if ((*bo) == NULL)
285 return;
286
287 tbo = &((*bo)->bo);
288 ttm_bo_unref(&tbo);
289 if (tbo == NULL)
290 *bo = NULL;
291
292}
293
294void cirrus_gem_free_object(struct drm_gem_object *obj)
295{
296 struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
297
298 if (!cirrus_bo)
299 return;
300 cirrus_bo_unref(&cirrus_bo);
301}
302
303
304static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
305{
306 return bo->bo.addr_space_offset;
307}
308
309int
310cirrus_dumb_mmap_offset(struct drm_file *file,
311 struct drm_device *dev,
312 uint32_t handle,
313 uint64_t *offset)
314{
315 struct drm_gem_object *obj;
316 int ret;
317 struct cirrus_bo *bo;
318
319 mutex_lock(&dev->struct_mutex);
320 obj = drm_gem_object_lookup(dev, file, handle);
321 if (obj == NULL) {
322 ret = -ENOENT;
323 goto out_unlock;
324 }
325
326 bo = gem_to_cirrus_bo(obj);
327 *offset = cirrus_bo_mmap_offset(bo);
328
329 drm_gem_object_unreference(obj);
330 ret = 0;
331out_unlock:
332 mutex_unlock(&dev->struct_mutex);
333 return ret;
334
335}