drm/udl: Unmap buffer object after damage update
[linux-2.6-block.git] / drivers / gpu / drm / udl / udl_fb.c
CommitLineData
12eb90f1 1// SPDX-License-Identifier: GPL-2.0-only
5320918b
DA
2/*
3 * Copyright (C) 2012 Red Hat
4 *
5 * based in parts on udlfb.c:
6 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
7 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
8 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
5320918b 9 */
a9dcf380
SR
10
11#include <linux/moduleparam.h>
32ecd242 12#include <linux/dma-buf.h>
5320918b 13
760285e7 14#include <drm/drm_crtc_helper.h>
a9dcf380 15#include <drm/drm_drv.h>
a9dcf380 16#include <drm/drm_fourcc.h>
08b22f65 17#include <drm/drm_gem_shmem_helper.h>
a9dcf380
SR
18#include <drm/drm_modeset_helper.h>
19
20#include "udl_drv.h"
5320918b 21
5320918b 22#define DL_ALIGN_UP(x, a) ALIGN(x, a)
ed067d4a 23#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
5320918b
DA
24
25/** Read the red component (0..255) of a 32 bpp colour. */
26#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
27
28/** Read the green component (0..255) of a 32 bpp colour. */
29#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
30
31/** Read the blue component (0..255) of a 32 bpp colour. */
32#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
33
34/** Return red/green component of a 16 bpp colour number. */
35#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
36
37/** Return green/blue component of a 16 bpp colour number. */
38#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
39
40/** Return 8 bpp colour number from red, green and blue components. */
41#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
42
43#if 0
44static uint8_t rgb8(uint32_t col)
45{
46 uint8_t red = DLO_RGB_GETRED(col);
47 uint8_t grn = DLO_RGB_GETGRN(col);
48 uint8_t blu = DLO_RGB_GETBLU(col);
49
50 return DLO_RGB8(red, grn, blu);
51}
52
53static uint16_t rgb16(uint32_t col)
54{
55 uint8_t red = DLO_RGB_GETRED(col);
56 uint8_t grn = DLO_RGB_GETGRN(col);
57 uint8_t blu = DLO_RGB_GETBLU(col);
58
59 return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
60}
61#endif
62
5320918b
DA
63int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
64 int width, int height)
65{
66 struct drm_device *dev = fb->base.dev;
fd96e0db 67 struct udl_device *udl = to_udl(dev);
5320918b
DA
68 int i, ret;
69 char *cmd;
70 cycles_t start_cycles, end_cycles;
71 int bytes_sent = 0;
72 int bytes_identical = 0;
73 struct urb *urb;
74 int aligned_x;
91ba11fb 75 int log_bpp;
6c44e30a 76 void *vaddr;
91ba11fb
MP
77
78 BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
79 log_bpp = __ffs(fb->base.format->cpp[0]);
5320918b
DA
80
81 if (!fb->active_16)
82 return 0;
83
6c44e30a
TZ
84 vaddr = drm_gem_shmem_vmap(&fb->shmem->base);
85 if (IS_ERR(vaddr)) {
86 DRM_ERROR("failed to vmap fb\n");
87 return 0;
e8aa1d1e 88 }
5320918b 89
5320918b
DA
90 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
91 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
92 x = aligned_x;
93
94 if ((width <= 0) ||
95 (x + width > fb->base.width) ||
6c44e30a
TZ
96 (y + height > fb->base.height)) {
97 ret = -EINVAL;
98 goto err_drm_gem_shmem_vunmap;
99 }
5320918b 100
bcb39af4
DA
101 start_cycles = get_cycles();
102
5320918b
DA
103 urb = udl_get_urb(dev);
104 if (!urb)
6c44e30a 105 goto out;
5320918b
DA
106 cmd = urb->transfer_buffer;
107
90fd68dc 108 for (i = y; i < y + height ; i++) {
5320918b 109 const int line_offset = fb->base.pitches[0] * i;
91ba11fb
MP
110 const int byte_offset = line_offset + (x << log_bpp);
111 const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
6c44e30a 112 if (udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
3916e1d7 113 &cmd, byte_offset, dev_byte_offset,
91ba11fb 114 width << log_bpp,
5320918b
DA
115 &bytes_identical, &bytes_sent))
116 goto error;
117 }
118
119 if (cmd > (char *) urb->transfer_buffer) {
120 /* Send partial buffer remaining before exiting */
99ec9e77
MP
121 int len;
122 if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
123 *cmd++ = 0xAF;
124 len = cmd - (char *) urb->transfer_buffer;
5320918b
DA
125 ret = udl_submit_urb(dev, urb, len);
126 bytes_sent += len;
127 } else
128 udl_urb_completion(urb);
129
130error:
131 atomic_add(bytes_sent, &udl->bytes_sent);
132 atomic_add(bytes_identical, &udl->bytes_identical);
91ba11fb 133 atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
5320918b
DA
134 end_cycles = get_cycles();
135 atomic_add(((unsigned int) ((end_cycles - start_cycles)
136 >> 10)), /* Kcycles */
137 &udl->cpu_kcycles_used);
138
6c44e30a
TZ
139out:
140 drm_gem_shmem_vunmap(&fb->shmem->base, vaddr);
141
5320918b 142 return 0;
6c44e30a
TZ
143
144err_drm_gem_shmem_vunmap:
145 drm_gem_shmem_vunmap(&fb->shmem->base, vaddr);
146 return ret;
5320918b
DA
147}
148
5320918b
DA
149static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
150 struct drm_file *file,
151 unsigned flags, unsigned color,
152 struct drm_clip_rect *clips,
153 unsigned num_clips)
154{
155 struct udl_framebuffer *ufb = to_udl_fb(fb);
08b22f65 156 struct dma_buf_attachment *import_attach;
5320918b 157 int i;
32ecd242 158 int ret = 0;
5320918b 159
73e9efd4
VS
160 drm_modeset_lock_all(fb->dev);
161
5320918b 162 if (!ufb->active_16)
73e9efd4 163 goto unlock;
5320918b 164
08b22f65
TZ
165 import_attach = ufb->shmem->base.import_attach;
166
167 if (import_attach) {
168 ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
32ecd242
DA
169 DMA_FROM_DEVICE);
170 if (ret)
73e9efd4 171 goto unlock;
32ecd242
DA
172 }
173
5320918b 174 for (i = 0; i < num_clips; i++) {
32ecd242 175 ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
5320918b
DA
176 clips[i].x2 - clips[i].x1,
177 clips[i].y2 - clips[i].y1);
32ecd242 178 if (ret)
06c99161 179 break;
5320918b 180 }
32ecd242 181
08b22f65
TZ
182 if (import_attach)
183 ret = dma_buf_end_cpu_access(import_attach->dmabuf,
18b862dc 184 DMA_FROM_DEVICE);
73e9efd4
VS
185
186 unlock:
187 drm_modeset_unlock_all(fb->dev);
188
32ecd242 189 return ret;
5320918b
DA
190}
191
192static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
193{
194 struct udl_framebuffer *ufb = to_udl_fb(fb);
195
08b22f65
TZ
196 if (ufb->shmem)
197 drm_gem_object_put_unlocked(&ufb->shmem->base);
5320918b
DA
198
199 drm_framebuffer_cleanup(fb);
200 kfree(ufb);
201}
202
203static const struct drm_framebuffer_funcs udlfb_funcs = {
204 .destroy = udl_user_framebuffer_destroy,
205 .dirty = udl_user_framebuffer_dirty,
5320918b
DA
206};
207
208
209static int
210udl_framebuffer_init(struct drm_device *dev,
211 struct udl_framebuffer *ufb,
1eb83451 212 const struct drm_mode_fb_cmd2 *mode_cmd,
08b22f65 213 struct drm_gem_shmem_object *shmem)
5320918b
DA
214{
215 int ret;
216
08b22f65 217 ufb->shmem = shmem;
a3f913ca 218 drm_helper_mode_fill_fb_struct(dev, &ufb->base, mode_cmd);
c7d73f6a 219 ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
5320918b
DA
220 return ret;
221}
222
5320918b
DA
223struct drm_framebuffer *
224udl_fb_user_fb_create(struct drm_device *dev,
225 struct drm_file *file,
1eb83451 226 const struct drm_mode_fb_cmd2 *mode_cmd)
5320918b
DA
227{
228 struct drm_gem_object *obj;
229 struct udl_framebuffer *ufb;
230 int ret;
96503f59 231 uint32_t size;
5320918b 232
a8ad0bd8 233 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
5320918b
DA
234 if (obj == NULL)
235 return ERR_PTR(-ENOENT);
236
96503f59
DA
237 size = mode_cmd->pitches[0] * mode_cmd->height;
238 size = ALIGN(size, PAGE_SIZE);
239
240 if (size > obj->size) {
241 DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
242 return ERR_PTR(-ENOMEM);
243 }
244
5320918b
DA
245 ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
246 if (ufb == NULL)
247 return ERR_PTR(-ENOMEM);
248
08b22f65
TZ
249 ret = udl_framebuffer_init(dev, ufb, mode_cmd,
250 to_drm_gem_shmem_obj(obj));
5320918b
DA
251 if (ret) {
252 kfree(ufb);
253 return ERR_PTR(-EINVAL);
254 }
255 return &ufb->base;
256}