Commit | Line | Data |
---|---|---|
45719127 AT |
1 | /* |
2 | * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver | |
3 | * | |
4 | * Copyright (c) 2013 Texas Instruments Inc. | |
5 | * David Griego, <dagriego@biglakesoftware.com> | |
6 | * Dale Farnsworth, <dale@farnsworth.org> | |
7 | * Archit Taneja, <archit@ti.com> | |
8 | * | |
9 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. | |
10 | * Pawel Osciak, <pawel@osciak.com> | |
11 | * Marek Szyprowski, <m.szyprowski@samsung.com> | |
12 | * | |
13 | * Based on the virtual v4l2-mem2mem example device | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms of the GNU General Public License version 2 as published by | |
17 | * the Free Software Foundation | |
18 | */ | |
19 | ||
20 | #include <linux/delay.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/fs.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/io.h> | |
26 | #include <linux/ioctl.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/platform_device.h> | |
29 | #include <linux/pm_runtime.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/videodev2.h> | |
a51cd8f5 | 33 | #include <linux/log2.h> |
45719127 AT |
34 | |
35 | #include <media/v4l2-common.h> | |
36 | #include <media/v4l2-ctrls.h> | |
37 | #include <media/v4l2-device.h> | |
38 | #include <media/v4l2-event.h> | |
39 | #include <media/v4l2-ioctl.h> | |
40 | #include <media/v4l2-mem2mem.h> | |
41 | #include <media/videobuf2-core.h> | |
42 | #include <media/videobuf2-dma-contig.h> | |
43 | ||
44 | #include "vpdma.h" | |
45 | #include "vpe_regs.h" | |
46 | ||
47 | #define VPE_MODULE_NAME "vpe" | |
48 | ||
49 | /* minimum and maximum frame sizes */ | |
50 | #define MIN_W 128 | |
51 | #define MIN_H 128 | |
52 | #define MAX_W 1920 | |
53 | #define MAX_H 1080 | |
54 | ||
55 | /* required alignments */ | |
56 | #define S_ALIGN 0 /* multiple of 1 */ | |
57 | #define H_ALIGN 1 /* multiple of 2 */ | |
45719127 AT |
58 | |
59 | /* flags that indicate a format can be used for capture/output */ | |
60 | #define VPE_FMT_TYPE_CAPTURE (1 << 0) | |
61 | #define VPE_FMT_TYPE_OUTPUT (1 << 1) | |
62 | ||
63 | /* used as plane indices */ | |
64 | #define VPE_MAX_PLANES 2 | |
65 | #define VPE_LUMA 0 | |
66 | #define VPE_CHROMA 1 | |
67 | ||
68 | /* per m2m context info */ | |
585e6f01 AT |
69 | #define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */ |
70 | ||
45719127 AT |
71 | #define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */ |
72 | ||
73 | /* | |
74 | * each VPE context can need up to 3 config desciptors, 7 input descriptors, | |
75 | * 3 output descriptors, and 10 control descriptors | |
76 | */ | |
77 | #define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \ | |
78 | 13 * VPDMA_CFD_CTD_DESC_SIZE) | |
79 | ||
80 | #define vpe_dbg(vpedev, fmt, arg...) \ | |
81 | dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg) | |
82 | #define vpe_err(vpedev, fmt, arg...) \ | |
83 | dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg) | |
84 | ||
85 | struct vpe_us_coeffs { | |
86 | unsigned short anchor_fid0_c0; | |
87 | unsigned short anchor_fid0_c1; | |
88 | unsigned short anchor_fid0_c2; | |
89 | unsigned short anchor_fid0_c3; | |
90 | unsigned short interp_fid0_c0; | |
91 | unsigned short interp_fid0_c1; | |
92 | unsigned short interp_fid0_c2; | |
93 | unsigned short interp_fid0_c3; | |
94 | unsigned short anchor_fid1_c0; | |
95 | unsigned short anchor_fid1_c1; | |
96 | unsigned short anchor_fid1_c2; | |
97 | unsigned short anchor_fid1_c3; | |
98 | unsigned short interp_fid1_c0; | |
99 | unsigned short interp_fid1_c1; | |
100 | unsigned short interp_fid1_c2; | |
101 | unsigned short interp_fid1_c3; | |
102 | }; | |
103 | ||
104 | /* | |
105 | * Default upsampler coefficients | |
106 | */ | |
107 | static const struct vpe_us_coeffs us_coeffs[] = { | |
108 | { | |
109 | /* Coefficients for progressive input */ | |
110 | 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, | |
111 | 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, | |
112 | }, | |
585e6f01 AT |
113 | { |
114 | /* Coefficients for Top Field Interlaced input */ | |
115 | 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3, | |
116 | /* Coefficients for Bottom Field Interlaced input */ | |
117 | 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9, | |
118 | }, | |
119 | }; | |
120 | ||
121 | /* | |
122 | * the following registers are for configuring some of the parameters of the | |
123 | * motion and edge detection blocks inside DEI, these generally remain the same, | |
124 | * these could be passed later via userspace if some one needs to tweak these. | |
125 | */ | |
126 | struct vpe_dei_regs { | |
127 | unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */ | |
128 | unsigned long edi_config_reg; /* VPE_DEI_REG3 */ | |
129 | unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */ | |
130 | unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */ | |
131 | unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */ | |
132 | unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */ | |
133 | }; | |
134 | ||
135 | /* | |
136 | * default expert DEI register values, unlikely to be modified. | |
137 | */ | |
138 | static const struct vpe_dei_regs dei_regs = { | |
139 | 0x020C0804u, | |
140 | 0x0118100Fu, | |
141 | 0x08040200u, | |
142 | 0x1010100Cu, | |
143 | 0x10101010u, | |
144 | 0x10101010u, | |
45719127 AT |
145 | }; |
146 | ||
147 | /* | |
148 | * The port_data structure contains per-port data. | |
149 | */ | |
150 | struct vpe_port_data { | |
151 | enum vpdma_channel channel; /* VPDMA channel */ | |
585e6f01 | 152 | u8 vb_index; /* input frame f, f-1, f-2 index */ |
45719127 AT |
153 | u8 vb_part; /* plane index for co-panar formats */ |
154 | }; | |
155 | ||
156 | /* | |
157 | * Define indices into the port_data tables | |
158 | */ | |
159 | #define VPE_PORT_LUMA1_IN 0 | |
160 | #define VPE_PORT_CHROMA1_IN 1 | |
585e6f01 AT |
161 | #define VPE_PORT_LUMA2_IN 2 |
162 | #define VPE_PORT_CHROMA2_IN 3 | |
163 | #define VPE_PORT_LUMA3_IN 4 | |
164 | #define VPE_PORT_CHROMA3_IN 5 | |
165 | #define VPE_PORT_MV_IN 6 | |
166 | #define VPE_PORT_MV_OUT 7 | |
45719127 AT |
167 | #define VPE_PORT_LUMA_OUT 8 |
168 | #define VPE_PORT_CHROMA_OUT 9 | |
169 | #define VPE_PORT_RGB_OUT 10 | |
170 | ||
171 | static const struct vpe_port_data port_data[11] = { | |
172 | [VPE_PORT_LUMA1_IN] = { | |
173 | .channel = VPE_CHAN_LUMA1_IN, | |
585e6f01 | 174 | .vb_index = 0, |
45719127 AT |
175 | .vb_part = VPE_LUMA, |
176 | }, | |
177 | [VPE_PORT_CHROMA1_IN] = { | |
178 | .channel = VPE_CHAN_CHROMA1_IN, | |
585e6f01 AT |
179 | .vb_index = 0, |
180 | .vb_part = VPE_CHROMA, | |
181 | }, | |
182 | [VPE_PORT_LUMA2_IN] = { | |
183 | .channel = VPE_CHAN_LUMA2_IN, | |
184 | .vb_index = 1, | |
185 | .vb_part = VPE_LUMA, | |
186 | }, | |
187 | [VPE_PORT_CHROMA2_IN] = { | |
188 | .channel = VPE_CHAN_CHROMA2_IN, | |
189 | .vb_index = 1, | |
190 | .vb_part = VPE_CHROMA, | |
191 | }, | |
192 | [VPE_PORT_LUMA3_IN] = { | |
193 | .channel = VPE_CHAN_LUMA3_IN, | |
194 | .vb_index = 2, | |
195 | .vb_part = VPE_LUMA, | |
196 | }, | |
197 | [VPE_PORT_CHROMA3_IN] = { | |
198 | .channel = VPE_CHAN_CHROMA3_IN, | |
199 | .vb_index = 2, | |
45719127 AT |
200 | .vb_part = VPE_CHROMA, |
201 | }, | |
585e6f01 AT |
202 | [VPE_PORT_MV_IN] = { |
203 | .channel = VPE_CHAN_MV_IN, | |
204 | }, | |
205 | [VPE_PORT_MV_OUT] = { | |
206 | .channel = VPE_CHAN_MV_OUT, | |
207 | }, | |
45719127 AT |
208 | [VPE_PORT_LUMA_OUT] = { |
209 | .channel = VPE_CHAN_LUMA_OUT, | |
210 | .vb_part = VPE_LUMA, | |
211 | }, | |
212 | [VPE_PORT_CHROMA_OUT] = { | |
213 | .channel = VPE_CHAN_CHROMA_OUT, | |
214 | .vb_part = VPE_CHROMA, | |
215 | }, | |
216 | [VPE_PORT_RGB_OUT] = { | |
217 | .channel = VPE_CHAN_RGB_OUT, | |
218 | .vb_part = VPE_LUMA, | |
219 | }, | |
220 | }; | |
221 | ||
222 | ||
223 | /* driver info for each of the supported video formats */ | |
224 | struct vpe_fmt { | |
225 | char *name; /* human-readable name */ | |
226 | u32 fourcc; /* standard format identifier */ | |
227 | u8 types; /* CAPTURE and/or OUTPUT */ | |
228 | u8 coplanar; /* set for unpacked Luma and Chroma */ | |
229 | /* vpdma format info for each plane */ | |
230 | struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES]; | |
231 | }; | |
232 | ||
233 | static struct vpe_fmt vpe_formats[] = { | |
234 | { | |
235 | .name = "YUV 422 co-planar", | |
236 | .fourcc = V4L2_PIX_FMT_NV16, | |
237 | .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, | |
238 | .coplanar = 1, | |
239 | .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444], | |
240 | &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444], | |
241 | }, | |
242 | }, | |
243 | { | |
244 | .name = "YUV 420 co-planar", | |
245 | .fourcc = V4L2_PIX_FMT_NV12, | |
246 | .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, | |
247 | .coplanar = 1, | |
248 | .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420], | |
249 | &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420], | |
250 | }, | |
251 | }, | |
252 | { | |
253 | .name = "YUYV 422 packed", | |
254 | .fourcc = V4L2_PIX_FMT_YUYV, | |
255 | .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, | |
256 | .coplanar = 0, | |
257 | .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422], | |
258 | }, | |
259 | }, | |
260 | { | |
261 | .name = "UYVY 422 packed", | |
262 | .fourcc = V4L2_PIX_FMT_UYVY, | |
263 | .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, | |
264 | .coplanar = 0, | |
265 | .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422], | |
266 | }, | |
267 | }, | |
268 | }; | |
269 | ||
270 | /* | |
271 | * per-queue, driver-specific private data. | |
272 | * there is one source queue and one destination queue for each m2m context. | |
273 | */ | |
274 | struct vpe_q_data { | |
275 | unsigned int width; /* frame width */ | |
276 | unsigned int height; /* frame height */ | |
277 | unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */ | |
278 | enum v4l2_colorspace colorspace; | |
585e6f01 | 279 | enum v4l2_field field; /* supported field value */ |
45719127 AT |
280 | unsigned int flags; |
281 | unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */ | |
282 | struct v4l2_rect c_rect; /* crop/compose rectangle */ | |
283 | struct vpe_fmt *fmt; /* format info */ | |
284 | }; | |
285 | ||
286 | /* vpe_q_data flag bits */ | |
287 | #define Q_DATA_FRAME_1D (1 << 0) | |
288 | #define Q_DATA_MODE_TILED (1 << 1) | |
585e6f01 | 289 | #define Q_DATA_INTERLACED (1 << 2) |
45719127 AT |
290 | |
291 | enum { | |
292 | Q_DATA_SRC = 0, | |
293 | Q_DATA_DST = 1, | |
294 | }; | |
295 | ||
296 | /* find our format description corresponding to the passed v4l2_format */ | |
297 | static struct vpe_fmt *find_format(struct v4l2_format *f) | |
298 | { | |
299 | struct vpe_fmt *fmt; | |
300 | unsigned int k; | |
301 | ||
302 | for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) { | |
303 | fmt = &vpe_formats[k]; | |
304 | if (fmt->fourcc == f->fmt.pix.pixelformat) | |
305 | return fmt; | |
306 | } | |
307 | ||
308 | return NULL; | |
309 | } | |
310 | ||
311 | /* | |
312 | * there is one vpe_dev structure in the driver, it is shared by | |
313 | * all instances. | |
314 | */ | |
315 | struct vpe_dev { | |
316 | struct v4l2_device v4l2_dev; | |
317 | struct video_device vfd; | |
318 | struct v4l2_m2m_dev *m2m_dev; | |
319 | ||
320 | atomic_t num_instances; /* count of driver instances */ | |
321 | dma_addr_t loaded_mmrs; /* shadow mmrs in device */ | |
322 | struct mutex dev_mutex; | |
323 | spinlock_t lock; | |
324 | ||
325 | int irq; | |
326 | void __iomem *base; | |
327 | ||
328 | struct vb2_alloc_ctx *alloc_ctx; | |
329 | struct vpdma_data *vpdma; /* vpdma data handle */ | |
330 | }; | |
331 | ||
332 | /* | |
333 | * There is one vpe_ctx structure for each m2m context. | |
334 | */ | |
335 | struct vpe_ctx { | |
336 | struct v4l2_fh fh; | |
337 | struct vpe_dev *dev; | |
338 | struct v4l2_m2m_ctx *m2m_ctx; | |
339 | struct v4l2_ctrl_handler hdl; | |
340 | ||
585e6f01 | 341 | unsigned int field; /* current field */ |
45719127 AT |
342 | unsigned int sequence; /* current frame/field seq */ |
343 | unsigned int aborting; /* abort after next irq */ | |
344 | ||
345 | unsigned int bufs_per_job; /* input buffers per batch */ | |
346 | unsigned int bufs_completed; /* bufs done in this batch */ | |
347 | ||
348 | struct vpe_q_data q_data[2]; /* src & dst queue data */ | |
585e6f01 | 349 | struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS]; |
45719127 AT |
350 | struct vb2_buffer *dst_vb; |
351 | ||
585e6f01 AT |
352 | dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */ |
353 | void *mv_buf[2]; /* virtual addrs of motion vector bufs */ | |
354 | size_t mv_buf_size; /* current motion vector buffer size */ | |
45719127 AT |
355 | struct vpdma_buf mmr_adb; /* shadow reg addr/data block */ |
356 | struct vpdma_desc_list desc_list; /* DMA descriptor list */ | |
357 | ||
585e6f01 | 358 | bool deinterlacing; /* using de-interlacer */ |
45719127 | 359 | bool load_mmrs; /* have new shadow reg values */ |
585e6f01 AT |
360 | |
361 | unsigned int src_mv_buf_selector; | |
45719127 AT |
362 | }; |
363 | ||
364 | ||
365 | /* | |
366 | * M2M devices get 2 queues. | |
367 | * Return the queue given the type. | |
368 | */ | |
369 | static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx, | |
370 | enum v4l2_buf_type type) | |
371 | { | |
372 | switch (type) { | |
373 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: | |
374 | return &ctx->q_data[Q_DATA_SRC]; | |
375 | case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: | |
376 | return &ctx->q_data[Q_DATA_DST]; | |
377 | default: | |
378 | BUG(); | |
379 | } | |
380 | return NULL; | |
381 | } | |
382 | ||
383 | static u32 read_reg(struct vpe_dev *dev, int offset) | |
384 | { | |
385 | return ioread32(dev->base + offset); | |
386 | } | |
387 | ||
388 | static void write_reg(struct vpe_dev *dev, int offset, u32 value) | |
389 | { | |
390 | iowrite32(value, dev->base + offset); | |
391 | } | |
392 | ||
393 | /* register field read/write helpers */ | |
394 | static int get_field(u32 value, u32 mask, int shift) | |
395 | { | |
396 | return (value & (mask << shift)) >> shift; | |
397 | } | |
398 | ||
399 | static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift) | |
400 | { | |
401 | return get_field(read_reg(dev, offset), mask, shift); | |
402 | } | |
403 | ||
404 | static void write_field(u32 *valp, u32 field, u32 mask, int shift) | |
405 | { | |
406 | u32 val = *valp; | |
407 | ||
408 | val &= ~(mask << shift); | |
409 | val |= (field & mask) << shift; | |
410 | *valp = val; | |
411 | } | |
412 | ||
413 | static void write_field_reg(struct vpe_dev *dev, int offset, u32 field, | |
414 | u32 mask, int shift) | |
415 | { | |
416 | u32 val = read_reg(dev, offset); | |
417 | ||
418 | write_field(&val, field, mask, shift); | |
419 | ||
420 | write_reg(dev, offset, val); | |
421 | } | |
422 | ||
423 | /* | |
424 | * DMA address/data block for the shadow registers | |
425 | */ | |
426 | struct vpe_mmr_adb { | |
427 | struct vpdma_adb_hdr out_fmt_hdr; | |
428 | u32 out_fmt_reg[1]; | |
429 | u32 out_fmt_pad[3]; | |
430 | struct vpdma_adb_hdr us1_hdr; | |
431 | u32 us1_regs[8]; | |
432 | struct vpdma_adb_hdr us2_hdr; | |
433 | u32 us2_regs[8]; | |
434 | struct vpdma_adb_hdr us3_hdr; | |
435 | u32 us3_regs[8]; | |
436 | struct vpdma_adb_hdr dei_hdr; | |
585e6f01 | 437 | u32 dei_regs[8]; |
45719127 AT |
438 | struct vpdma_adb_hdr sc_hdr; |
439 | u32 sc_regs[1]; | |
440 | u32 sc_pad[3]; | |
441 | struct vpdma_adb_hdr csc_hdr; | |
442 | u32 csc_regs[6]; | |
443 | u32 csc_pad[2]; | |
444 | }; | |
445 | ||
446 | #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \ | |
447 | VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a) | |
448 | /* | |
449 | * Set the headers for all of the address/data block structures. | |
450 | */ | |
451 | static void init_adb_hdrs(struct vpe_ctx *ctx) | |
452 | { | |
453 | VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT); | |
454 | VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0); | |
455 | VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0); | |
456 | VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0); | |
457 | VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE); | |
458 | VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0); | |
459 | VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00); | |
460 | }; | |
461 | ||
585e6f01 AT |
462 | /* |
463 | * Allocate or re-allocate the motion vector DMA buffers | |
464 | * There are two buffers, one for input and one for output. | |
465 | * However, the roles are reversed after each field is processed. | |
466 | * In other words, after each field is processed, the previous | |
467 | * output (dst) MV buffer becomes the new input (src) MV buffer. | |
468 | */ | |
469 | static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size) | |
470 | { | |
471 | struct device *dev = ctx->dev->v4l2_dev.dev; | |
472 | ||
473 | if (ctx->mv_buf_size == size) | |
474 | return 0; | |
475 | ||
476 | if (ctx->mv_buf[0]) | |
477 | dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0], | |
478 | ctx->mv_buf_dma[0]); | |
479 | ||
480 | if (ctx->mv_buf[1]) | |
481 | dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1], | |
482 | ctx->mv_buf_dma[1]); | |
483 | ||
484 | if (size == 0) | |
485 | return 0; | |
486 | ||
487 | ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0], | |
488 | GFP_KERNEL); | |
489 | if (!ctx->mv_buf[0]) { | |
490 | vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); | |
491 | return -ENOMEM; | |
492 | } | |
493 | ||
494 | ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1], | |
495 | GFP_KERNEL); | |
496 | if (!ctx->mv_buf[1]) { | |
497 | vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); | |
498 | dma_free_coherent(dev, size, ctx->mv_buf[0], | |
499 | ctx->mv_buf_dma[0]); | |
500 | ||
501 | return -ENOMEM; | |
502 | } | |
503 | ||
504 | ctx->mv_buf_size = size; | |
505 | ctx->src_mv_buf_selector = 0; | |
506 | ||
507 | return 0; | |
508 | } | |
509 | ||
510 | static void free_mv_buffers(struct vpe_ctx *ctx) | |
511 | { | |
512 | realloc_mv_buffers(ctx, 0); | |
513 | } | |
514 | ||
515 | /* | |
516 | * While de-interlacing, we keep the two most recent input buffers | |
517 | * around. This function frees those two buffers when we have | |
518 | * finished processing the current stream. | |
519 | */ | |
520 | static void free_vbs(struct vpe_ctx *ctx) | |
521 | { | |
522 | struct vpe_dev *dev = ctx->dev; | |
523 | unsigned long flags; | |
524 | ||
525 | if (ctx->src_vbs[2] == NULL) | |
526 | return; | |
527 | ||
528 | spin_lock_irqsave(&dev->lock, flags); | |
529 | if (ctx->src_vbs[2]) { | |
530 | v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE); | |
531 | v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE); | |
532 | } | |
533 | spin_unlock_irqrestore(&dev->lock, flags); | |
534 | } | |
535 | ||
45719127 AT |
536 | /* |
537 | * Enable or disable the VPE clocks | |
538 | */ | |
539 | static void vpe_set_clock_enable(struct vpe_dev *dev, bool on) | |
540 | { | |
541 | u32 val = 0; | |
542 | ||
543 | if (on) | |
544 | val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE; | |
545 | write_reg(dev, VPE_CLK_ENABLE, val); | |
546 | } | |
547 | ||
548 | static void vpe_top_reset(struct vpe_dev *dev) | |
549 | { | |
550 | ||
551 | write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK, | |
552 | VPE_DATA_PATH_CLK_RESET_SHIFT); | |
553 | ||
554 | usleep_range(100, 150); | |
555 | ||
556 | write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK, | |
557 | VPE_DATA_PATH_CLK_RESET_SHIFT); | |
558 | } | |
559 | ||
560 | static void vpe_top_vpdma_reset(struct vpe_dev *dev) | |
561 | { | |
562 | write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK, | |
563 | VPE_VPDMA_CLK_RESET_SHIFT); | |
564 | ||
565 | usleep_range(100, 150); | |
566 | ||
567 | write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK, | |
568 | VPE_VPDMA_CLK_RESET_SHIFT); | |
569 | } | |
570 | ||
571 | /* | |
572 | * Load the correct of upsampler coefficients into the shadow MMRs | |
573 | */ | |
574 | static void set_us_coefficients(struct vpe_ctx *ctx) | |
575 | { | |
576 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
585e6f01 | 577 | struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; |
45719127 AT |
578 | u32 *us1_reg = &mmr_adb->us1_regs[0]; |
579 | u32 *us2_reg = &mmr_adb->us2_regs[0]; | |
580 | u32 *us3_reg = &mmr_adb->us3_regs[0]; | |
581 | const unsigned short *cp, *end_cp; | |
582 | ||
583 | cp = &us_coeffs[0].anchor_fid0_c0; | |
584 | ||
585e6f01 AT |
585 | if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */ |
586 | cp += sizeof(us_coeffs[0]) / sizeof(*cp); | |
587 | ||
45719127 AT |
588 | end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp); |
589 | ||
590 | while (cp < end_cp) { | |
591 | write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT); | |
592 | write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT); | |
593 | *us2_reg++ = *us1_reg; | |
594 | *us3_reg++ = *us1_reg++; | |
595 | } | |
596 | ctx->load_mmrs = true; | |
597 | } | |
598 | ||
599 | /* | |
600 | * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs. | |
601 | */ | |
602 | static void set_cfg_and_line_modes(struct vpe_ctx *ctx) | |
603 | { | |
604 | struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt; | |
605 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
606 | u32 *us1_reg0 = &mmr_adb->us1_regs[0]; | |
607 | u32 *us2_reg0 = &mmr_adb->us2_regs[0]; | |
608 | u32 *us3_reg0 = &mmr_adb->us3_regs[0]; | |
609 | int line_mode = 1; | |
610 | int cfg_mode = 1; | |
611 | ||
612 | /* | |
613 | * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing. | |
614 | * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing. | |
615 | */ | |
616 | ||
617 | if (fmt->fourcc == V4L2_PIX_FMT_NV12) { | |
618 | cfg_mode = 0; | |
619 | line_mode = 0; /* double lines to line buffer */ | |
620 | } | |
621 | ||
622 | write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); | |
623 | write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); | |
624 | write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); | |
625 | ||
626 | /* regs for now */ | |
627 | vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN); | |
585e6f01 AT |
628 | vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN); |
629 | vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN); | |
45719127 AT |
630 | |
631 | /* frame start for input luma */ | |
632 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
633 | VPE_CHAN_LUMA1_IN); | |
585e6f01 AT |
634 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, |
635 | VPE_CHAN_LUMA2_IN); | |
636 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
637 | VPE_CHAN_LUMA3_IN); | |
45719127 AT |
638 | |
639 | /* frame start for input chroma */ | |
640 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
641 | VPE_CHAN_CHROMA1_IN); | |
585e6f01 AT |
642 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, |
643 | VPE_CHAN_CHROMA2_IN); | |
644 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
645 | VPE_CHAN_CHROMA3_IN); | |
646 | ||
647 | /* frame start for MV in client */ | |
648 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
649 | VPE_CHAN_MV_IN); | |
45719127 AT |
650 | |
651 | ctx->load_mmrs = true; | |
652 | } | |
653 | ||
654 | /* | |
655 | * Set the shadow registers that are modified when the source | |
656 | * format changes. | |
657 | */ | |
658 | static void set_src_registers(struct vpe_ctx *ctx) | |
659 | { | |
660 | set_us_coefficients(ctx); | |
661 | } | |
662 | ||
663 | /* | |
664 | * Set the shadow registers that are modified when the destination | |
665 | * format changes. | |
666 | */ | |
667 | static void set_dst_registers(struct vpe_ctx *ctx) | |
668 | { | |
669 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
670 | struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt; | |
671 | u32 val = 0; | |
672 | ||
673 | /* select RGB path when color space conversion is supported in future */ | |
674 | if (fmt->fourcc == V4L2_PIX_FMT_RGB24) | |
675 | val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER; | |
676 | else if (fmt->fourcc == V4L2_PIX_FMT_NV16) | |
677 | val |= VPE_COLOR_SEPARATE_422; | |
678 | ||
679 | /* The source of CHR_DS is always the scaler, whether it's used or not */ | |
680 | val |= VPE_DS_SRC_DEI_SCALER; | |
681 | ||
682 | if (fmt->fourcc != V4L2_PIX_FMT_NV12) | |
683 | val |= VPE_DS_BYPASS; | |
684 | ||
685 | mmr_adb->out_fmt_reg[0] = val; | |
686 | ||
687 | ctx->load_mmrs = true; | |
688 | } | |
689 | ||
690 | /* | |
691 | * Set the de-interlacer shadow register values | |
692 | */ | |
585e6f01 | 693 | static void set_dei_regs(struct vpe_ctx *ctx) |
45719127 AT |
694 | { |
695 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
696 | struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; | |
697 | unsigned int src_h = s_q_data->c_rect.height; | |
698 | unsigned int src_w = s_q_data->c_rect.width; | |
699 | u32 *dei_mmr0 = &mmr_adb->dei_regs[0]; | |
585e6f01 | 700 | bool deinterlace = true; |
45719127 AT |
701 | u32 val = 0; |
702 | ||
703 | /* | |
704 | * according to TRM, we should set DEI in progressive bypass mode when | |
705 | * the input content is progressive, however, DEI is bypassed correctly | |
706 | * for both progressive and interlace content in interlace bypass mode. | |
707 | * It has been recommended not to use progressive bypass mode. | |
708 | */ | |
585e6f01 AT |
709 | if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) || |
710 | !(s_q_data->flags & Q_DATA_INTERLACED)) { | |
711 | deinterlace = false; | |
712 | val = VPE_DEI_INTERLACE_BYPASS; | |
713 | } | |
714 | ||
715 | src_h = deinterlace ? src_h * 2 : src_h; | |
45719127 AT |
716 | |
717 | val |= (src_h << VPE_DEI_HEIGHT_SHIFT) | | |
718 | (src_w << VPE_DEI_WIDTH_SHIFT) | | |
719 | VPE_DEI_FIELD_FLUSH; | |
720 | ||
721 | *dei_mmr0 = val; | |
722 | ||
723 | ctx->load_mmrs = true; | |
724 | } | |
725 | ||
585e6f01 AT |
726 | static void set_dei_shadow_registers(struct vpe_ctx *ctx) |
727 | { | |
728 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
729 | u32 *dei_mmr = &mmr_adb->dei_regs[0]; | |
730 | const struct vpe_dei_regs *cur = &dei_regs; | |
731 | ||
732 | dei_mmr[2] = cur->mdt_spacial_freq_thr_reg; | |
733 | dei_mmr[3] = cur->edi_config_reg; | |
734 | dei_mmr[4] = cur->edi_lut_reg0; | |
735 | dei_mmr[5] = cur->edi_lut_reg1; | |
736 | dei_mmr[6] = cur->edi_lut_reg2; | |
737 | dei_mmr[7] = cur->edi_lut_reg3; | |
738 | ||
739 | ctx->load_mmrs = true; | |
740 | } | |
741 | ||
45719127 AT |
742 | static void set_csc_coeff_bypass(struct vpe_ctx *ctx) |
743 | { | |
744 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
745 | u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5]; | |
746 | ||
747 | *shadow_csc_reg5 |= VPE_CSC_BYPASS; | |
748 | ||
749 | ctx->load_mmrs = true; | |
750 | } | |
751 | ||
752 | static void set_sc_regs_bypass(struct vpe_ctx *ctx) | |
753 | { | |
754 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
755 | u32 *sc_reg0 = &mmr_adb->sc_regs[0]; | |
756 | u32 val = 0; | |
757 | ||
758 | val |= VPE_SC_BYPASS; | |
759 | *sc_reg0 = val; | |
760 | ||
761 | ctx->load_mmrs = true; | |
762 | } | |
763 | ||
764 | /* | |
765 | * Set the shadow registers whose values are modified when either the | |
766 | * source or destination format is changed. | |
767 | */ | |
768 | static int set_srcdst_params(struct vpe_ctx *ctx) | |
769 | { | |
585e6f01 AT |
770 | struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; |
771 | struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; | |
772 | size_t mv_buf_size; | |
773 | int ret; | |
774 | ||
45719127 | 775 | ctx->sequence = 0; |
585e6f01 AT |
776 | ctx->field = V4L2_FIELD_TOP; |
777 | ||
778 | if ((s_q_data->flags & Q_DATA_INTERLACED) && | |
779 | !(d_q_data->flags & Q_DATA_INTERLACED)) { | |
a51cd8f5 | 780 | int bytes_per_line; |
585e6f01 AT |
781 | const struct vpdma_data_format *mv = |
782 | &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; | |
783 | ||
784 | ctx->deinterlacing = 1; | |
a51cd8f5 AT |
785 | /* |
786 | * we make sure that the source image has a 16 byte aligned | |
787 | * stride, we need to do the same for the motion vector buffer | |
788 | * by aligning it's stride to the next 16 byte boundry. this | |
789 | * extra space will not be used by the de-interlacer, but will | |
790 | * ensure that vpdma operates correctly | |
791 | */ | |
792 | bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3, | |
793 | VPDMA_STRIDE_ALIGN); | |
794 | mv_buf_size = bytes_per_line * s_q_data->height; | |
585e6f01 AT |
795 | } else { |
796 | ctx->deinterlacing = 0; | |
797 | mv_buf_size = 0; | |
798 | } | |
799 | ||
800 | free_vbs(ctx); | |
801 | ||
802 | ret = realloc_mv_buffers(ctx, mv_buf_size); | |
803 | if (ret) | |
804 | return ret; | |
45719127 AT |
805 | |
806 | set_cfg_and_line_modes(ctx); | |
585e6f01 | 807 | set_dei_regs(ctx); |
45719127 AT |
808 | set_csc_coeff_bypass(ctx); |
809 | set_sc_regs_bypass(ctx); | |
810 | ||
811 | return 0; | |
812 | } | |
813 | ||
814 | /* | |
815 | * Return the vpe_ctx structure for a given struct file | |
816 | */ | |
817 | static struct vpe_ctx *file2ctx(struct file *file) | |
818 | { | |
819 | return container_of(file->private_data, struct vpe_ctx, fh); | |
820 | } | |
821 | ||
822 | /* | |
823 | * mem2mem callbacks | |
824 | */ | |
825 | ||
826 | /** | |
827 | * job_ready() - check whether an instance is ready to be scheduled to run | |
828 | */ | |
829 | static int job_ready(void *priv) | |
830 | { | |
831 | struct vpe_ctx *ctx = priv; | |
832 | int needed = ctx->bufs_per_job; | |
833 | ||
585e6f01 AT |
834 | if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) |
835 | needed += 2; /* need additional two most recent fields */ | |
836 | ||
45719127 AT |
837 | if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed) |
838 | return 0; | |
839 | ||
840 | return 1; | |
841 | } | |
842 | ||
843 | static void job_abort(void *priv) | |
844 | { | |
845 | struct vpe_ctx *ctx = priv; | |
846 | ||
847 | /* Will cancel the transaction in the next interrupt handler */ | |
848 | ctx->aborting = 1; | |
849 | } | |
850 | ||
851 | /* | |
852 | * Lock access to the device | |
853 | */ | |
854 | static void vpe_lock(void *priv) | |
855 | { | |
856 | struct vpe_ctx *ctx = priv; | |
857 | struct vpe_dev *dev = ctx->dev; | |
858 | mutex_lock(&dev->dev_mutex); | |
859 | } | |
860 | ||
861 | static void vpe_unlock(void *priv) | |
862 | { | |
863 | struct vpe_ctx *ctx = priv; | |
864 | struct vpe_dev *dev = ctx->dev; | |
865 | mutex_unlock(&dev->dev_mutex); | |
866 | } | |
867 | ||
868 | static void vpe_dump_regs(struct vpe_dev *dev) | |
869 | { | |
870 | #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r)) | |
871 | ||
872 | vpe_dbg(dev, "VPE Registers:\n"); | |
873 | ||
874 | DUMPREG(PID); | |
875 | DUMPREG(SYSCONFIG); | |
876 | DUMPREG(INT0_STATUS0_RAW); | |
877 | DUMPREG(INT0_STATUS0); | |
878 | DUMPREG(INT0_ENABLE0); | |
879 | DUMPREG(INT0_STATUS1_RAW); | |
880 | DUMPREG(INT0_STATUS1); | |
881 | DUMPREG(INT0_ENABLE1); | |
882 | DUMPREG(CLK_ENABLE); | |
883 | DUMPREG(CLK_RESET); | |
884 | DUMPREG(CLK_FORMAT_SELECT); | |
885 | DUMPREG(CLK_RANGE_MAP); | |
886 | DUMPREG(US1_R0); | |
887 | DUMPREG(US1_R1); | |
888 | DUMPREG(US1_R2); | |
889 | DUMPREG(US1_R3); | |
890 | DUMPREG(US1_R4); | |
891 | DUMPREG(US1_R5); | |
892 | DUMPREG(US1_R6); | |
893 | DUMPREG(US1_R7); | |
894 | DUMPREG(US2_R0); | |
895 | DUMPREG(US2_R1); | |
896 | DUMPREG(US2_R2); | |
897 | DUMPREG(US2_R3); | |
898 | DUMPREG(US2_R4); | |
899 | DUMPREG(US2_R5); | |
900 | DUMPREG(US2_R6); | |
901 | DUMPREG(US2_R7); | |
902 | DUMPREG(US3_R0); | |
903 | DUMPREG(US3_R1); | |
904 | DUMPREG(US3_R2); | |
905 | DUMPREG(US3_R3); | |
906 | DUMPREG(US3_R4); | |
907 | DUMPREG(US3_R5); | |
908 | DUMPREG(US3_R6); | |
909 | DUMPREG(US3_R7); | |
910 | DUMPREG(DEI_FRAME_SIZE); | |
911 | DUMPREG(MDT_BYPASS); | |
912 | DUMPREG(MDT_SF_THRESHOLD); | |
913 | DUMPREG(EDI_CONFIG); | |
914 | DUMPREG(DEI_EDI_LUT_R0); | |
915 | DUMPREG(DEI_EDI_LUT_R1); | |
916 | DUMPREG(DEI_EDI_LUT_R2); | |
917 | DUMPREG(DEI_EDI_LUT_R3); | |
918 | DUMPREG(DEI_FMD_WINDOW_R0); | |
919 | DUMPREG(DEI_FMD_WINDOW_R1); | |
920 | DUMPREG(DEI_FMD_CONTROL_R0); | |
921 | DUMPREG(DEI_FMD_CONTROL_R1); | |
922 | DUMPREG(DEI_FMD_STATUS_R0); | |
923 | DUMPREG(DEI_FMD_STATUS_R1); | |
924 | DUMPREG(DEI_FMD_STATUS_R2); | |
925 | DUMPREG(SC_MP_SC0); | |
926 | DUMPREG(SC_MP_SC1); | |
927 | DUMPREG(SC_MP_SC2); | |
928 | DUMPREG(SC_MP_SC3); | |
929 | DUMPREG(SC_MP_SC4); | |
930 | DUMPREG(SC_MP_SC5); | |
931 | DUMPREG(SC_MP_SC6); | |
932 | DUMPREG(SC_MP_SC8); | |
933 | DUMPREG(SC_MP_SC9); | |
934 | DUMPREG(SC_MP_SC10); | |
935 | DUMPREG(SC_MP_SC11); | |
936 | DUMPREG(SC_MP_SC12); | |
937 | DUMPREG(SC_MP_SC13); | |
938 | DUMPREG(SC_MP_SC17); | |
939 | DUMPREG(SC_MP_SC18); | |
940 | DUMPREG(SC_MP_SC19); | |
941 | DUMPREG(SC_MP_SC20); | |
942 | DUMPREG(SC_MP_SC21); | |
943 | DUMPREG(SC_MP_SC22); | |
944 | DUMPREG(SC_MP_SC23); | |
945 | DUMPREG(SC_MP_SC24); | |
946 | DUMPREG(SC_MP_SC25); | |
947 | DUMPREG(CSC_CSC00); | |
948 | DUMPREG(CSC_CSC01); | |
949 | DUMPREG(CSC_CSC02); | |
950 | DUMPREG(CSC_CSC03); | |
951 | DUMPREG(CSC_CSC04); | |
952 | DUMPREG(CSC_CSC05); | |
953 | #undef DUMPREG | |
954 | } | |
955 | ||
956 | static void add_out_dtd(struct vpe_ctx *ctx, int port) | |
957 | { | |
958 | struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST]; | |
959 | const struct vpe_port_data *p_data = &port_data[port]; | |
960 | struct vb2_buffer *vb = ctx->dst_vb; | |
961 | struct v4l2_rect *c_rect = &q_data->c_rect; | |
962 | struct vpe_fmt *fmt = q_data->fmt; | |
963 | const struct vpdma_data_format *vpdma_fmt; | |
585e6f01 | 964 | int mv_buf_selector = !ctx->src_mv_buf_selector; |
45719127 AT |
965 | dma_addr_t dma_addr; |
966 | u32 flags = 0; | |
967 | ||
585e6f01 AT |
968 | if (port == VPE_PORT_MV_OUT) { |
969 | vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; | |
970 | dma_addr = ctx->mv_buf_dma[mv_buf_selector]; | |
971 | } else { | |
972 | /* to incorporate interleaved formats */ | |
973 | int plane = fmt->coplanar ? p_data->vb_part : 0; | |
974 | ||
975 | vpdma_fmt = fmt->vpdma_fmt[plane]; | |
976 | dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); | |
977 | if (!dma_addr) { | |
978 | vpe_err(ctx->dev, | |
979 | "acquiring output buffer(%d) dma_addr failed\n", | |
980 | port); | |
981 | return; | |
982 | } | |
45719127 AT |
983 | } |
984 | ||
985 | if (q_data->flags & Q_DATA_FRAME_1D) | |
986 | flags |= VPDMA_DATA_FRAME_1D; | |
987 | if (q_data->flags & Q_DATA_MODE_TILED) | |
988 | flags |= VPDMA_DATA_MODE_TILED; | |
989 | ||
990 | vpdma_add_out_dtd(&ctx->desc_list, c_rect, vpdma_fmt, dma_addr, | |
991 | p_data->channel, flags); | |
992 | } | |
993 | ||
994 | static void add_in_dtd(struct vpe_ctx *ctx, int port) | |
995 | { | |
996 | struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC]; | |
997 | const struct vpe_port_data *p_data = &port_data[port]; | |
585e6f01 | 998 | struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index]; |
45719127 AT |
999 | struct v4l2_rect *c_rect = &q_data->c_rect; |
1000 | struct vpe_fmt *fmt = q_data->fmt; | |
1001 | const struct vpdma_data_format *vpdma_fmt; | |
585e6f01 AT |
1002 | int mv_buf_selector = ctx->src_mv_buf_selector; |
1003 | int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM; | |
45719127 AT |
1004 | dma_addr_t dma_addr; |
1005 | u32 flags = 0; | |
1006 | ||
585e6f01 AT |
1007 | if (port == VPE_PORT_MV_IN) { |
1008 | vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; | |
1009 | dma_addr = ctx->mv_buf_dma[mv_buf_selector]; | |
1010 | } else { | |
1011 | /* to incorporate interleaved formats */ | |
1012 | int plane = fmt->coplanar ? p_data->vb_part : 0; | |
45719127 | 1013 | |
585e6f01 AT |
1014 | vpdma_fmt = fmt->vpdma_fmt[plane]; |
1015 | ||
1016 | dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); | |
1017 | if (!dma_addr) { | |
1018 | vpe_err(ctx->dev, | |
1019 | "acquiring input buffer(%d) dma_addr failed\n", | |
1020 | port); | |
1021 | return; | |
1022 | } | |
45719127 AT |
1023 | } |
1024 | ||
1025 | if (q_data->flags & Q_DATA_FRAME_1D) | |
1026 | flags |= VPDMA_DATA_FRAME_1D; | |
1027 | if (q_data->flags & Q_DATA_MODE_TILED) | |
1028 | flags |= VPDMA_DATA_MODE_TILED; | |
1029 | ||
1030 | vpdma_add_in_dtd(&ctx->desc_list, q_data->width, q_data->height, | |
1031 | c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags); | |
1032 | } | |
1033 | ||
1034 | /* | |
1035 | * Enable the expected IRQ sources | |
1036 | */ | |
1037 | static void enable_irqs(struct vpe_ctx *ctx) | |
1038 | { | |
1039 | write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE); | |
585e6f01 AT |
1040 | write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT | |
1041 | VPE_DS1_UV_ERROR_INT); | |
45719127 AT |
1042 | |
1043 | vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true); | |
1044 | } | |
1045 | ||
1046 | static void disable_irqs(struct vpe_ctx *ctx) | |
1047 | { | |
1048 | write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff); | |
1049 | write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff); | |
1050 | ||
1051 | vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false); | |
1052 | } | |
1053 | ||
1054 | /* device_run() - prepares and starts the device | |
1055 | * | |
1056 | * This function is only called when both the source and destination | |
1057 | * buffers are in place. | |
1058 | */ | |
1059 | static void device_run(void *priv) | |
1060 | { | |
1061 | struct vpe_ctx *ctx = priv; | |
1062 | struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; | |
1063 | ||
585e6f01 AT |
1064 | if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) { |
1065 | ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); | |
1066 | WARN_ON(ctx->src_vbs[2] == NULL); | |
1067 | ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); | |
1068 | WARN_ON(ctx->src_vbs[1] == NULL); | |
1069 | } | |
1070 | ||
1071 | ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); | |
1072 | WARN_ON(ctx->src_vbs[0] == NULL); | |
45719127 AT |
1073 | ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); |
1074 | WARN_ON(ctx->dst_vb == NULL); | |
1075 | ||
1076 | /* config descriptors */ | |
1077 | if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) { | |
1078 | vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb); | |
1079 | vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb); | |
1080 | ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr; | |
1081 | ctx->load_mmrs = false; | |
1082 | } | |
1083 | ||
585e6f01 AT |
1084 | /* output data descriptors */ |
1085 | if (ctx->deinterlacing) | |
1086 | add_out_dtd(ctx, VPE_PORT_MV_OUT); | |
1087 | ||
45719127 AT |
1088 | add_out_dtd(ctx, VPE_PORT_LUMA_OUT); |
1089 | if (d_q_data->fmt->coplanar) | |
1090 | add_out_dtd(ctx, VPE_PORT_CHROMA_OUT); | |
1091 | ||
585e6f01 AT |
1092 | /* input data descriptors */ |
1093 | if (ctx->deinterlacing) { | |
1094 | add_in_dtd(ctx, VPE_PORT_LUMA3_IN); | |
1095 | add_in_dtd(ctx, VPE_PORT_CHROMA3_IN); | |
1096 | ||
1097 | add_in_dtd(ctx, VPE_PORT_LUMA2_IN); | |
1098 | add_in_dtd(ctx, VPE_PORT_CHROMA2_IN); | |
1099 | } | |
1100 | ||
45719127 AT |
1101 | add_in_dtd(ctx, VPE_PORT_LUMA1_IN); |
1102 | add_in_dtd(ctx, VPE_PORT_CHROMA1_IN); | |
1103 | ||
585e6f01 AT |
1104 | if (ctx->deinterlacing) |
1105 | add_in_dtd(ctx, VPE_PORT_MV_IN); | |
1106 | ||
45719127 AT |
1107 | /* sync on channel control descriptors for input ports */ |
1108 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN); | |
1109 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN); | |
1110 | ||
585e6f01 AT |
1111 | if (ctx->deinterlacing) { |
1112 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, | |
1113 | VPE_CHAN_LUMA2_IN); | |
1114 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, | |
1115 | VPE_CHAN_CHROMA2_IN); | |
1116 | ||
1117 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, | |
1118 | VPE_CHAN_LUMA3_IN); | |
1119 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, | |
1120 | VPE_CHAN_CHROMA3_IN); | |
1121 | ||
1122 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN); | |
1123 | } | |
1124 | ||
45719127 AT |
1125 | /* sync on channel control descriptors for output ports */ |
1126 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT); | |
1127 | if (d_q_data->fmt->coplanar) | |
1128 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT); | |
1129 | ||
585e6f01 AT |
1130 | if (ctx->deinterlacing) |
1131 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT); | |
1132 | ||
45719127 AT |
1133 | enable_irqs(ctx); |
1134 | ||
1135 | vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf); | |
1136 | vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list); | |
1137 | } | |
1138 | ||
585e6f01 AT |
1139 | static void dei_error(struct vpe_ctx *ctx) |
1140 | { | |
1141 | dev_warn(ctx->dev->v4l2_dev.dev, | |
1142 | "received DEI error interrupt\n"); | |
1143 | } | |
1144 | ||
45719127 AT |
1145 | static void ds1_uv_error(struct vpe_ctx *ctx) |
1146 | { | |
1147 | dev_warn(ctx->dev->v4l2_dev.dev, | |
1148 | "received downsampler error interrupt\n"); | |
1149 | } | |
1150 | ||
1151 | static irqreturn_t vpe_irq(int irq_vpe, void *data) | |
1152 | { | |
1153 | struct vpe_dev *dev = (struct vpe_dev *)data; | |
1154 | struct vpe_ctx *ctx; | |
585e6f01 | 1155 | struct vpe_q_data *d_q_data; |
45719127 AT |
1156 | struct vb2_buffer *s_vb, *d_vb; |
1157 | struct v4l2_buffer *s_buf, *d_buf; | |
1158 | unsigned long flags; | |
1159 | u32 irqst0, irqst1; | |
1160 | ||
1161 | irqst0 = read_reg(dev, VPE_INT0_STATUS0); | |
1162 | if (irqst0) { | |
1163 | write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0); | |
1164 | vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0); | |
1165 | } | |
1166 | ||
1167 | irqst1 = read_reg(dev, VPE_INT0_STATUS1); | |
1168 | if (irqst1) { | |
1169 | write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1); | |
1170 | vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1); | |
1171 | } | |
1172 | ||
1173 | ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); | |
1174 | if (!ctx) { | |
1175 | vpe_err(dev, "instance released before end of transaction\n"); | |
1176 | goto handled; | |
1177 | } | |
1178 | ||
585e6f01 AT |
1179 | if (irqst1) { |
1180 | if (irqst1 & VPE_DEI_ERROR_INT) { | |
1181 | irqst1 &= ~VPE_DEI_ERROR_INT; | |
1182 | dei_error(ctx); | |
1183 | } | |
1184 | if (irqst1 & VPE_DS1_UV_ERROR_INT) { | |
1185 | irqst1 &= ~VPE_DS1_UV_ERROR_INT; | |
1186 | ds1_uv_error(ctx); | |
1187 | } | |
45719127 AT |
1188 | } |
1189 | ||
1190 | if (irqst0) { | |
1191 | if (irqst0 & VPE_INT0_LIST0_COMPLETE) | |
1192 | vpdma_clear_list_stat(ctx->dev->vpdma); | |
1193 | ||
1194 | irqst0 &= ~(VPE_INT0_LIST0_COMPLETE); | |
1195 | } | |
1196 | ||
1197 | if (irqst0 | irqst1) { | |
1198 | dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: " | |
1199 | "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n", | |
1200 | irqst0, irqst1); | |
1201 | } | |
1202 | ||
1203 | disable_irqs(ctx); | |
1204 | ||
1205 | vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf); | |
1206 | vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb); | |
1207 | ||
1208 | vpdma_reset_desc_list(&ctx->desc_list); | |
1209 | ||
585e6f01 AT |
1210 | /* the previous dst mv buffer becomes the next src mv buffer */ |
1211 | ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector; | |
1212 | ||
45719127 AT |
1213 | if (ctx->aborting) |
1214 | goto finished; | |
1215 | ||
585e6f01 | 1216 | s_vb = ctx->src_vbs[0]; |
45719127 AT |
1217 | d_vb = ctx->dst_vb; |
1218 | s_buf = &s_vb->v4l2_buf; | |
1219 | d_buf = &d_vb->v4l2_buf; | |
1220 | ||
1221 | d_buf->timestamp = s_buf->timestamp; | |
1222 | if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) { | |
1223 | d_buf->flags |= V4L2_BUF_FLAG_TIMECODE; | |
1224 | d_buf->timecode = s_buf->timecode; | |
1225 | } | |
45719127 | 1226 | d_buf->sequence = ctx->sequence; |
585e6f01 AT |
1227 | d_buf->field = ctx->field; |
1228 | ||
1229 | d_q_data = &ctx->q_data[Q_DATA_DST]; | |
1230 | if (d_q_data->flags & Q_DATA_INTERLACED) { | |
1231 | if (ctx->field == V4L2_FIELD_BOTTOM) { | |
1232 | ctx->sequence++; | |
1233 | ctx->field = V4L2_FIELD_TOP; | |
1234 | } else { | |
1235 | WARN_ON(ctx->field != V4L2_FIELD_TOP); | |
1236 | ctx->field = V4L2_FIELD_BOTTOM; | |
1237 | } | |
1238 | } else { | |
1239 | ctx->sequence++; | |
1240 | } | |
45719127 | 1241 | |
585e6f01 AT |
1242 | if (ctx->deinterlacing) |
1243 | s_vb = ctx->src_vbs[2]; | |
45719127 AT |
1244 | |
1245 | spin_lock_irqsave(&dev->lock, flags); | |
1246 | v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE); | |
1247 | v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE); | |
1248 | spin_unlock_irqrestore(&dev->lock, flags); | |
1249 | ||
585e6f01 AT |
1250 | if (ctx->deinterlacing) { |
1251 | ctx->src_vbs[2] = ctx->src_vbs[1]; | |
1252 | ctx->src_vbs[1] = ctx->src_vbs[0]; | |
1253 | } | |
1254 | ||
45719127 AT |
1255 | ctx->bufs_completed++; |
1256 | if (ctx->bufs_completed < ctx->bufs_per_job) { | |
1257 | device_run(ctx); | |
1258 | goto handled; | |
1259 | } | |
1260 | ||
1261 | finished: | |
1262 | vpe_dbg(ctx->dev, "finishing transaction\n"); | |
1263 | ctx->bufs_completed = 0; | |
1264 | v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx); | |
1265 | handled: | |
1266 | return IRQ_HANDLED; | |
1267 | } | |
1268 | ||
1269 | /* | |
1270 | * video ioctls | |
1271 | */ | |
1272 | static int vpe_querycap(struct file *file, void *priv, | |
1273 | struct v4l2_capability *cap) | |
1274 | { | |
1275 | strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1); | |
1276 | strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1); | |
1277 | strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info)); | |
1278 | cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; | |
1279 | cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; | |
1280 | return 0; | |
1281 | } | |
1282 | ||
1283 | static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type) | |
1284 | { | |
1285 | int i, index; | |
1286 | struct vpe_fmt *fmt = NULL; | |
1287 | ||
1288 | index = 0; | |
1289 | for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) { | |
1290 | if (vpe_formats[i].types & type) { | |
1291 | if (index == f->index) { | |
1292 | fmt = &vpe_formats[i]; | |
1293 | break; | |
1294 | } | |
1295 | index++; | |
1296 | } | |
1297 | } | |
1298 | ||
1299 | if (!fmt) | |
1300 | return -EINVAL; | |
1301 | ||
1302 | strncpy(f->description, fmt->name, sizeof(f->description) - 1); | |
1303 | f->pixelformat = fmt->fourcc; | |
1304 | return 0; | |
1305 | } | |
1306 | ||
1307 | static int vpe_enum_fmt(struct file *file, void *priv, | |
1308 | struct v4l2_fmtdesc *f) | |
1309 | { | |
1310 | if (V4L2_TYPE_IS_OUTPUT(f->type)) | |
1311 | return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT); | |
1312 | ||
1313 | return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE); | |
1314 | } | |
1315 | ||
1316 | static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f) | |
1317 | { | |
1318 | struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; | |
1319 | struct vpe_ctx *ctx = file2ctx(file); | |
1320 | struct vb2_queue *vq; | |
1321 | struct vpe_q_data *q_data; | |
1322 | int i; | |
1323 | ||
1324 | vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); | |
1325 | if (!vq) | |
1326 | return -EINVAL; | |
1327 | ||
1328 | q_data = get_q_data(ctx, f->type); | |
1329 | ||
1330 | pix->width = q_data->width; | |
1331 | pix->height = q_data->height; | |
1332 | pix->pixelformat = q_data->fmt->fourcc; | |
585e6f01 | 1333 | pix->field = q_data->field; |
45719127 AT |
1334 | |
1335 | if (V4L2_TYPE_IS_OUTPUT(f->type)) { | |
1336 | pix->colorspace = q_data->colorspace; | |
1337 | } else { | |
1338 | struct vpe_q_data *s_q_data; | |
1339 | ||
1340 | /* get colorspace from the source queue */ | |
1341 | s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); | |
1342 | ||
1343 | pix->colorspace = s_q_data->colorspace; | |
1344 | } | |
1345 | ||
1346 | pix->num_planes = q_data->fmt->coplanar ? 2 : 1; | |
1347 | ||
1348 | for (i = 0; i < pix->num_planes; i++) { | |
1349 | pix->plane_fmt[i].bytesperline = q_data->bytesperline[i]; | |
1350 | pix->plane_fmt[i].sizeimage = q_data->sizeimage[i]; | |
1351 | } | |
1352 | ||
1353 | return 0; | |
1354 | } | |
1355 | ||
1356 | static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, | |
1357 | struct vpe_fmt *fmt, int type) | |
1358 | { | |
1359 | struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; | |
1360 | struct v4l2_plane_pix_format *plane_fmt; | |
a51cd8f5 AT |
1361 | unsigned int w_align; |
1362 | int i, depth, depth_bytes; | |
45719127 AT |
1363 | |
1364 | if (!fmt || !(fmt->types & type)) { | |
1365 | vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n", | |
1366 | pix->pixelformat); | |
1367 | return -EINVAL; | |
1368 | } | |
1369 | ||
585e6f01 AT |
1370 | if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE) |
1371 | pix->field = V4L2_FIELD_NONE; | |
45719127 | 1372 | |
a51cd8f5 AT |
1373 | depth = fmt->vpdma_fmt[VPE_LUMA]->depth; |
1374 | ||
1375 | /* | |
1376 | * the line stride should 16 byte aligned for VPDMA to work, based on | |
1377 | * the bytes per pixel, figure out how much the width should be aligned | |
1378 | * to make sure line stride is 16 byte aligned | |
1379 | */ | |
1380 | depth_bytes = depth >> 3; | |
1381 | ||
1382 | if (depth_bytes == 3) | |
1383 | /* | |
1384 | * if bpp is 3(as in some RGB formats), the pixel width doesn't | |
1385 | * really help in ensuring line stride is 16 byte aligned | |
1386 | */ | |
1387 | w_align = 4; | |
1388 | else | |
1389 | /* | |
1390 | * for the remainder bpp(4, 2 and 1), the pixel width alignment | |
1391 | * can ensure a line stride alignment of 16 bytes. For example, | |
1392 | * if bpp is 2, then the line stride can be 16 byte aligned if | |
1393 | * the width is 8 byte aligned | |
1394 | */ | |
1395 | w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes); | |
1396 | ||
1397 | v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align, | |
45719127 AT |
1398 | &pix->height, MIN_H, MAX_H, H_ALIGN, |
1399 | S_ALIGN); | |
1400 | ||
1401 | pix->num_planes = fmt->coplanar ? 2 : 1; | |
1402 | pix->pixelformat = fmt->fourcc; | |
1403 | ||
1404 | if (type == VPE_FMT_TYPE_CAPTURE) { | |
1405 | struct vpe_q_data *s_q_data; | |
1406 | ||
1407 | /* get colorspace from the source queue */ | |
1408 | s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); | |
1409 | ||
1410 | pix->colorspace = s_q_data->colorspace; | |
1411 | } else { | |
1412 | if (!pix->colorspace) | |
1413 | pix->colorspace = V4L2_COLORSPACE_SMPTE240M; | |
1414 | } | |
1415 | ||
1416 | for (i = 0; i < pix->num_planes; i++) { | |
45719127 AT |
1417 | plane_fmt = &pix->plane_fmt[i]; |
1418 | depth = fmt->vpdma_fmt[i]->depth; | |
1419 | ||
1420 | if (i == VPE_LUMA) | |
a51cd8f5 | 1421 | plane_fmt->bytesperline = (pix->width * depth) >> 3; |
45719127 AT |
1422 | else |
1423 | plane_fmt->bytesperline = pix->width; | |
1424 | ||
1425 | plane_fmt->sizeimage = | |
1426 | (pix->height * pix->width * depth) >> 3; | |
1427 | } | |
1428 | ||
1429 | return 0; | |
1430 | } | |
1431 | ||
1432 | static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f) | |
1433 | { | |
1434 | struct vpe_ctx *ctx = file2ctx(file); | |
1435 | struct vpe_fmt *fmt = find_format(f); | |
1436 | ||
1437 | if (V4L2_TYPE_IS_OUTPUT(f->type)) | |
1438 | return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT); | |
1439 | else | |
1440 | return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE); | |
1441 | } | |
1442 | ||
1443 | static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f) | |
1444 | { | |
1445 | struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; | |
1446 | struct v4l2_plane_pix_format *plane_fmt; | |
1447 | struct vpe_q_data *q_data; | |
1448 | struct vb2_queue *vq; | |
1449 | int i; | |
1450 | ||
1451 | vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); | |
1452 | if (!vq) | |
1453 | return -EINVAL; | |
1454 | ||
1455 | if (vb2_is_busy(vq)) { | |
1456 | vpe_err(ctx->dev, "queue busy\n"); | |
1457 | return -EBUSY; | |
1458 | } | |
1459 | ||
1460 | q_data = get_q_data(ctx, f->type); | |
1461 | if (!q_data) | |
1462 | return -EINVAL; | |
1463 | ||
1464 | q_data->fmt = find_format(f); | |
1465 | q_data->width = pix->width; | |
1466 | q_data->height = pix->height; | |
1467 | q_data->colorspace = pix->colorspace; | |
585e6f01 | 1468 | q_data->field = pix->field; |
45719127 AT |
1469 | |
1470 | for (i = 0; i < pix->num_planes; i++) { | |
1471 | plane_fmt = &pix->plane_fmt[i]; | |
1472 | ||
1473 | q_data->bytesperline[i] = plane_fmt->bytesperline; | |
1474 | q_data->sizeimage[i] = plane_fmt->sizeimage; | |
1475 | } | |
1476 | ||
1477 | q_data->c_rect.left = 0; | |
1478 | q_data->c_rect.top = 0; | |
1479 | q_data->c_rect.width = q_data->width; | |
1480 | q_data->c_rect.height = q_data->height; | |
1481 | ||
585e6f01 AT |
1482 | if (q_data->field == V4L2_FIELD_ALTERNATE) |
1483 | q_data->flags |= Q_DATA_INTERLACED; | |
1484 | else | |
1485 | q_data->flags &= ~Q_DATA_INTERLACED; | |
1486 | ||
45719127 AT |
1487 | vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d", |
1488 | f->type, q_data->width, q_data->height, q_data->fmt->fourcc, | |
1489 | q_data->bytesperline[VPE_LUMA]); | |
1490 | if (q_data->fmt->coplanar) | |
1491 | vpe_dbg(ctx->dev, " bpl_uv %d\n", | |
1492 | q_data->bytesperline[VPE_CHROMA]); | |
1493 | ||
1494 | return 0; | |
1495 | } | |
1496 | ||
1497 | static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |
1498 | { | |
1499 | int ret; | |
1500 | struct vpe_ctx *ctx = file2ctx(file); | |
1501 | ||
1502 | ret = vpe_try_fmt(file, priv, f); | |
1503 | if (ret) | |
1504 | return ret; | |
1505 | ||
1506 | ret = __vpe_s_fmt(ctx, f); | |
1507 | if (ret) | |
1508 | return ret; | |
1509 | ||
1510 | if (V4L2_TYPE_IS_OUTPUT(f->type)) | |
1511 | set_src_registers(ctx); | |
1512 | else | |
1513 | set_dst_registers(ctx); | |
1514 | ||
1515 | return set_srcdst_params(ctx); | |
1516 | } | |
1517 | ||
1518 | static int vpe_reqbufs(struct file *file, void *priv, | |
1519 | struct v4l2_requestbuffers *reqbufs) | |
1520 | { | |
1521 | struct vpe_ctx *ctx = file2ctx(file); | |
1522 | ||
1523 | return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); | |
1524 | } | |
1525 | ||
1526 | static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) | |
1527 | { | |
1528 | struct vpe_ctx *ctx = file2ctx(file); | |
1529 | ||
1530 | return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); | |
1531 | } | |
1532 | ||
1533 | static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) | |
1534 | { | |
1535 | struct vpe_ctx *ctx = file2ctx(file); | |
1536 | ||
1537 | return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); | |
1538 | } | |
1539 | ||
1540 | static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) | |
1541 | { | |
1542 | struct vpe_ctx *ctx = file2ctx(file); | |
1543 | ||
1544 | return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); | |
1545 | } | |
1546 | ||
1547 | static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type) | |
1548 | { | |
1549 | struct vpe_ctx *ctx = file2ctx(file); | |
1550 | ||
1551 | return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); | |
1552 | } | |
1553 | ||
1554 | static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) | |
1555 | { | |
1556 | struct vpe_ctx *ctx = file2ctx(file); | |
1557 | ||
1558 | vpe_dump_regs(ctx->dev); | |
1559 | vpdma_dump_regs(ctx->dev->vpdma); | |
1560 | ||
1561 | return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); | |
1562 | } | |
1563 | ||
1564 | /* | |
1565 | * defines number of buffers/frames a context can process with VPE before | |
1566 | * switching to a different context. default value is 1 buffer per context | |
1567 | */ | |
1568 | #define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0) | |
1569 | ||
1570 | static int vpe_s_ctrl(struct v4l2_ctrl *ctrl) | |
1571 | { | |
1572 | struct vpe_ctx *ctx = | |
1573 | container_of(ctrl->handler, struct vpe_ctx, hdl); | |
1574 | ||
1575 | switch (ctrl->id) { | |
1576 | case V4L2_CID_VPE_BUFS_PER_JOB: | |
1577 | ctx->bufs_per_job = ctrl->val; | |
1578 | break; | |
1579 | ||
1580 | default: | |
1581 | vpe_err(ctx->dev, "Invalid control\n"); | |
1582 | return -EINVAL; | |
1583 | } | |
1584 | ||
1585 | return 0; | |
1586 | } | |
1587 | ||
1588 | static const struct v4l2_ctrl_ops vpe_ctrl_ops = { | |
1589 | .s_ctrl = vpe_s_ctrl, | |
1590 | }; | |
1591 | ||
1592 | static const struct v4l2_ioctl_ops vpe_ioctl_ops = { | |
1593 | .vidioc_querycap = vpe_querycap, | |
1594 | ||
1595 | .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt, | |
1596 | .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt, | |
1597 | .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt, | |
1598 | .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt, | |
1599 | ||
1600 | .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt, | |
1601 | .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt, | |
1602 | .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt, | |
1603 | .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt, | |
1604 | ||
1605 | .vidioc_reqbufs = vpe_reqbufs, | |
1606 | .vidioc_querybuf = vpe_querybuf, | |
1607 | ||
1608 | .vidioc_qbuf = vpe_qbuf, | |
1609 | .vidioc_dqbuf = vpe_dqbuf, | |
1610 | ||
1611 | .vidioc_streamon = vpe_streamon, | |
1612 | .vidioc_streamoff = vpe_streamoff, | |
1613 | .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, | |
1614 | .vidioc_unsubscribe_event = v4l2_event_unsubscribe, | |
1615 | }; | |
1616 | ||
1617 | /* | |
1618 | * Queue operations | |
1619 | */ | |
1620 | static int vpe_queue_setup(struct vb2_queue *vq, | |
1621 | const struct v4l2_format *fmt, | |
1622 | unsigned int *nbuffers, unsigned int *nplanes, | |
1623 | unsigned int sizes[], void *alloc_ctxs[]) | |
1624 | { | |
1625 | int i; | |
1626 | struct vpe_ctx *ctx = vb2_get_drv_priv(vq); | |
1627 | struct vpe_q_data *q_data; | |
1628 | ||
1629 | q_data = get_q_data(ctx, vq->type); | |
1630 | ||
1631 | *nplanes = q_data->fmt->coplanar ? 2 : 1; | |
1632 | ||
1633 | for (i = 0; i < *nplanes; i++) { | |
1634 | sizes[i] = q_data->sizeimage[i]; | |
1635 | alloc_ctxs[i] = ctx->dev->alloc_ctx; | |
1636 | } | |
1637 | ||
1638 | vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers, | |
1639 | sizes[VPE_LUMA]); | |
1640 | if (q_data->fmt->coplanar) | |
1641 | vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]); | |
1642 | ||
1643 | return 0; | |
1644 | } | |
1645 | ||
1646 | static int vpe_buf_prepare(struct vb2_buffer *vb) | |
1647 | { | |
1648 | struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); | |
1649 | struct vpe_q_data *q_data; | |
1650 | int i, num_planes; | |
1651 | ||
1652 | vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type); | |
1653 | ||
1654 | q_data = get_q_data(ctx, vb->vb2_queue->type); | |
1655 | num_planes = q_data->fmt->coplanar ? 2 : 1; | |
1656 | ||
1657 | for (i = 0; i < num_planes; i++) { | |
1658 | if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) { | |
1659 | vpe_err(ctx->dev, | |
1660 | "data will not fit into plane (%lu < %lu)\n", | |
1661 | vb2_plane_size(vb, i), | |
1662 | (long) q_data->sizeimage[i]); | |
1663 | return -EINVAL; | |
1664 | } | |
1665 | } | |
1666 | ||
1667 | for (i = 0; i < num_planes; i++) | |
1668 | vb2_set_plane_payload(vb, i, q_data->sizeimage[i]); | |
1669 | ||
1670 | return 0; | |
1671 | } | |
1672 | ||
1673 | static void vpe_buf_queue(struct vb2_buffer *vb) | |
1674 | { | |
1675 | struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); | |
1676 | v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); | |
1677 | } | |
1678 | ||
1679 | static void vpe_wait_prepare(struct vb2_queue *q) | |
1680 | { | |
1681 | struct vpe_ctx *ctx = vb2_get_drv_priv(q); | |
1682 | vpe_unlock(ctx); | |
1683 | } | |
1684 | ||
1685 | static void vpe_wait_finish(struct vb2_queue *q) | |
1686 | { | |
1687 | struct vpe_ctx *ctx = vb2_get_drv_priv(q); | |
1688 | vpe_lock(ctx); | |
1689 | } | |
1690 | ||
1691 | static struct vb2_ops vpe_qops = { | |
1692 | .queue_setup = vpe_queue_setup, | |
1693 | .buf_prepare = vpe_buf_prepare, | |
1694 | .buf_queue = vpe_buf_queue, | |
1695 | .wait_prepare = vpe_wait_prepare, | |
1696 | .wait_finish = vpe_wait_finish, | |
1697 | }; | |
1698 | ||
1699 | static int queue_init(void *priv, struct vb2_queue *src_vq, | |
1700 | struct vb2_queue *dst_vq) | |
1701 | { | |
1702 | struct vpe_ctx *ctx = priv; | |
1703 | int ret; | |
1704 | ||
1705 | memset(src_vq, 0, sizeof(*src_vq)); | |
1706 | src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1707 | src_vq->io_modes = VB2_MMAP; | |
1708 | src_vq->drv_priv = ctx; | |
1709 | src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); | |
1710 | src_vq->ops = &vpe_qops; | |
1711 | src_vq->mem_ops = &vb2_dma_contig_memops; | |
1712 | src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; | |
1713 | ||
1714 | ret = vb2_queue_init(src_vq); | |
1715 | if (ret) | |
1716 | return ret; | |
1717 | ||
1718 | memset(dst_vq, 0, sizeof(*dst_vq)); | |
1719 | dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1720 | dst_vq->io_modes = VB2_MMAP; | |
1721 | dst_vq->drv_priv = ctx; | |
1722 | dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); | |
1723 | dst_vq->ops = &vpe_qops; | |
1724 | dst_vq->mem_ops = &vb2_dma_contig_memops; | |
1725 | dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; | |
1726 | ||
1727 | return vb2_queue_init(dst_vq); | |
1728 | } | |
1729 | ||
1730 | static const struct v4l2_ctrl_config vpe_bufs_per_job = { | |
1731 | .ops = &vpe_ctrl_ops, | |
1732 | .id = V4L2_CID_VPE_BUFS_PER_JOB, | |
1733 | .name = "Buffers Per Transaction", | |
1734 | .type = V4L2_CTRL_TYPE_INTEGER, | |
1735 | .def = VPE_DEF_BUFS_PER_JOB, | |
1736 | .min = 1, | |
1737 | .max = VIDEO_MAX_FRAME, | |
1738 | .step = 1, | |
1739 | }; | |
1740 | ||
1741 | /* | |
1742 | * File operations | |
1743 | */ | |
1744 | static int vpe_open(struct file *file) | |
1745 | { | |
1746 | struct vpe_dev *dev = video_drvdata(file); | |
1747 | struct vpe_ctx *ctx = NULL; | |
1748 | struct vpe_q_data *s_q_data; | |
1749 | struct v4l2_ctrl_handler *hdl; | |
1750 | int ret; | |
1751 | ||
1752 | vpe_dbg(dev, "vpe_open\n"); | |
1753 | ||
1754 | if (!dev->vpdma->ready) { | |
1755 | vpe_err(dev, "vpdma firmware not loaded\n"); | |
1756 | return -ENODEV; | |
1757 | } | |
1758 | ||
1759 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
1760 | if (!ctx) | |
1761 | return -ENOMEM; | |
1762 | ||
1763 | ctx->dev = dev; | |
1764 | ||
1765 | if (mutex_lock_interruptible(&dev->dev_mutex)) { | |
1766 | ret = -ERESTARTSYS; | |
1767 | goto free_ctx; | |
1768 | } | |
1769 | ||
1770 | ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE, | |
1771 | VPDMA_LIST_TYPE_NORMAL); | |
1772 | if (ret != 0) | |
1773 | goto unlock; | |
1774 | ||
1775 | ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb)); | |
1776 | if (ret != 0) | |
1777 | goto free_desc_list; | |
1778 | ||
1779 | init_adb_hdrs(ctx); | |
1780 | ||
1781 | v4l2_fh_init(&ctx->fh, video_devdata(file)); | |
1782 | file->private_data = &ctx->fh; | |
1783 | ||
1784 | hdl = &ctx->hdl; | |
1785 | v4l2_ctrl_handler_init(hdl, 1); | |
1786 | v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL); | |
1787 | if (hdl->error) { | |
1788 | ret = hdl->error; | |
1789 | goto exit_fh; | |
1790 | } | |
1791 | ctx->fh.ctrl_handler = hdl; | |
1792 | v4l2_ctrl_handler_setup(hdl); | |
1793 | ||
1794 | s_q_data = &ctx->q_data[Q_DATA_SRC]; | |
1795 | s_q_data->fmt = &vpe_formats[2]; | |
1796 | s_q_data->width = 1920; | |
1797 | s_q_data->height = 1080; | |
1798 | s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height * | |
1799 | s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3; | |
1800 | s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M; | |
585e6f01 | 1801 | s_q_data->field = V4L2_FIELD_NONE; |
45719127 AT |
1802 | s_q_data->c_rect.left = 0; |
1803 | s_q_data->c_rect.top = 0; | |
1804 | s_q_data->c_rect.width = s_q_data->width; | |
1805 | s_q_data->c_rect.height = s_q_data->height; | |
1806 | s_q_data->flags = 0; | |
1807 | ||
1808 | ctx->q_data[Q_DATA_DST] = *s_q_data; | |
1809 | ||
585e6f01 | 1810 | set_dei_shadow_registers(ctx); |
45719127 AT |
1811 | set_src_registers(ctx); |
1812 | set_dst_registers(ctx); | |
1813 | ret = set_srcdst_params(ctx); | |
1814 | if (ret) | |
1815 | goto exit_fh; | |
1816 | ||
1817 | ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); | |
1818 | ||
1819 | if (IS_ERR(ctx->m2m_ctx)) { | |
1820 | ret = PTR_ERR(ctx->m2m_ctx); | |
1821 | goto exit_fh; | |
1822 | } | |
1823 | ||
1824 | v4l2_fh_add(&ctx->fh); | |
1825 | ||
1826 | /* | |
1827 | * for now, just report the creation of the first instance, we can later | |
1828 | * optimize the driver to enable or disable clocks when the first | |
1829 | * instance is created or the last instance released | |
1830 | */ | |
1831 | if (atomic_inc_return(&dev->num_instances) == 1) | |
1832 | vpe_dbg(dev, "first instance created\n"); | |
1833 | ||
1834 | ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB; | |
1835 | ||
1836 | ctx->load_mmrs = true; | |
1837 | ||
1838 | vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n", | |
1839 | ctx, ctx->m2m_ctx); | |
1840 | ||
1841 | mutex_unlock(&dev->dev_mutex); | |
1842 | ||
1843 | return 0; | |
1844 | exit_fh: | |
1845 | v4l2_ctrl_handler_free(hdl); | |
1846 | v4l2_fh_exit(&ctx->fh); | |
1847 | vpdma_free_desc_buf(&ctx->mmr_adb); | |
1848 | free_desc_list: | |
1849 | vpdma_free_desc_list(&ctx->desc_list); | |
1850 | unlock: | |
1851 | mutex_unlock(&dev->dev_mutex); | |
1852 | free_ctx: | |
1853 | kfree(ctx); | |
1854 | return ret; | |
1855 | } | |
1856 | ||
1857 | static int vpe_release(struct file *file) | |
1858 | { | |
1859 | struct vpe_dev *dev = video_drvdata(file); | |
1860 | struct vpe_ctx *ctx = file2ctx(file); | |
1861 | ||
1862 | vpe_dbg(dev, "releasing instance %p\n", ctx); | |
1863 | ||
1864 | mutex_lock(&dev->dev_mutex); | |
585e6f01 AT |
1865 | free_vbs(ctx); |
1866 | free_mv_buffers(ctx); | |
45719127 AT |
1867 | vpdma_free_desc_list(&ctx->desc_list); |
1868 | vpdma_free_desc_buf(&ctx->mmr_adb); | |
1869 | ||
1870 | v4l2_fh_del(&ctx->fh); | |
1871 | v4l2_fh_exit(&ctx->fh); | |
1872 | v4l2_ctrl_handler_free(&ctx->hdl); | |
1873 | v4l2_m2m_ctx_release(ctx->m2m_ctx); | |
1874 | ||
1875 | kfree(ctx); | |
1876 | ||
1877 | /* | |
1878 | * for now, just report the release of the last instance, we can later | |
1879 | * optimize the driver to enable or disable clocks when the first | |
1880 | * instance is created or the last instance released | |
1881 | */ | |
1882 | if (atomic_dec_return(&dev->num_instances) == 0) | |
1883 | vpe_dbg(dev, "last instance released\n"); | |
1884 | ||
1885 | mutex_unlock(&dev->dev_mutex); | |
1886 | ||
1887 | return 0; | |
1888 | } | |
1889 | ||
1890 | static unsigned int vpe_poll(struct file *file, | |
1891 | struct poll_table_struct *wait) | |
1892 | { | |
1893 | struct vpe_ctx *ctx = file2ctx(file); | |
1894 | struct vpe_dev *dev = ctx->dev; | |
1895 | int ret; | |
1896 | ||
1897 | mutex_lock(&dev->dev_mutex); | |
1898 | ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait); | |
1899 | mutex_unlock(&dev->dev_mutex); | |
1900 | return ret; | |
1901 | } | |
1902 | ||
1903 | static int vpe_mmap(struct file *file, struct vm_area_struct *vma) | |
1904 | { | |
1905 | struct vpe_ctx *ctx = file2ctx(file); | |
1906 | struct vpe_dev *dev = ctx->dev; | |
1907 | int ret; | |
1908 | ||
1909 | if (mutex_lock_interruptible(&dev->dev_mutex)) | |
1910 | return -ERESTARTSYS; | |
1911 | ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); | |
1912 | mutex_unlock(&dev->dev_mutex); | |
1913 | return ret; | |
1914 | } | |
1915 | ||
1916 | static const struct v4l2_file_operations vpe_fops = { | |
1917 | .owner = THIS_MODULE, | |
1918 | .open = vpe_open, | |
1919 | .release = vpe_release, | |
1920 | .poll = vpe_poll, | |
1921 | .unlocked_ioctl = video_ioctl2, | |
1922 | .mmap = vpe_mmap, | |
1923 | }; | |
1924 | ||
1925 | static struct video_device vpe_videodev = { | |
1926 | .name = VPE_MODULE_NAME, | |
1927 | .fops = &vpe_fops, | |
1928 | .ioctl_ops = &vpe_ioctl_ops, | |
1929 | .minor = -1, | |
1930 | .release = video_device_release, | |
1931 | .vfl_dir = VFL_DIR_M2M, | |
1932 | }; | |
1933 | ||
1934 | static struct v4l2_m2m_ops m2m_ops = { | |
1935 | .device_run = device_run, | |
1936 | .job_ready = job_ready, | |
1937 | .job_abort = job_abort, | |
1938 | .lock = vpe_lock, | |
1939 | .unlock = vpe_unlock, | |
1940 | }; | |
1941 | ||
1942 | static int vpe_runtime_get(struct platform_device *pdev) | |
1943 | { | |
1944 | int r; | |
1945 | ||
1946 | dev_dbg(&pdev->dev, "vpe_runtime_get\n"); | |
1947 | ||
1948 | r = pm_runtime_get_sync(&pdev->dev); | |
1949 | WARN_ON(r < 0); | |
1950 | return r < 0 ? r : 0; | |
1951 | } | |
1952 | ||
1953 | static void vpe_runtime_put(struct platform_device *pdev) | |
1954 | { | |
1955 | ||
1956 | int r; | |
1957 | ||
1958 | dev_dbg(&pdev->dev, "vpe_runtime_put\n"); | |
1959 | ||
1960 | r = pm_runtime_put_sync(&pdev->dev); | |
1961 | WARN_ON(r < 0 && r != -ENOSYS); | |
1962 | } | |
1963 | ||
1964 | static int vpe_probe(struct platform_device *pdev) | |
1965 | { | |
1966 | struct vpe_dev *dev; | |
1967 | struct video_device *vfd; | |
1968 | struct resource *res; | |
1969 | int ret, irq, func; | |
1970 | ||
1971 | dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); | |
b68231a1 WY |
1972 | if (!dev) |
1973 | return -ENOMEM; | |
45719127 AT |
1974 | |
1975 | spin_lock_init(&dev->lock); | |
1976 | ||
1977 | ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); | |
1978 | if (ret) | |
1979 | return ret; | |
1980 | ||
1981 | atomic_set(&dev->num_instances, 0); | |
1982 | mutex_init(&dev->dev_mutex); | |
1983 | ||
1984 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top"); | |
1985 | /* | |
1986 | * HACK: we get resource info from device tree in the form of a list of | |
1987 | * VPE sub blocks, the driver currently uses only the base of vpe_top | |
1988 | * for register access, the driver should be changed later to access | |
1989 | * registers based on the sub block base addresses | |
1990 | */ | |
1991 | dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K); | |
b68231a1 WY |
1992 | if (!dev->base) { |
1993 | ret = -ENOMEM; | |
45719127 AT |
1994 | goto v4l2_dev_unreg; |
1995 | } | |
1996 | ||
1997 | irq = platform_get_irq(pdev, 0); | |
1998 | ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME, | |
1999 | dev); | |
2000 | if (ret) | |
2001 | goto v4l2_dev_unreg; | |
2002 | ||
2003 | platform_set_drvdata(pdev, dev); | |
2004 | ||
2005 | dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); | |
2006 | if (IS_ERR(dev->alloc_ctx)) { | |
2007 | vpe_err(dev, "Failed to alloc vb2 context\n"); | |
2008 | ret = PTR_ERR(dev->alloc_ctx); | |
2009 | goto v4l2_dev_unreg; | |
2010 | } | |
2011 | ||
2012 | dev->m2m_dev = v4l2_m2m_init(&m2m_ops); | |
2013 | if (IS_ERR(dev->m2m_dev)) { | |
2014 | vpe_err(dev, "Failed to init mem2mem device\n"); | |
2015 | ret = PTR_ERR(dev->m2m_dev); | |
2016 | goto rel_ctx; | |
2017 | } | |
2018 | ||
2019 | pm_runtime_enable(&pdev->dev); | |
2020 | ||
2021 | ret = vpe_runtime_get(pdev); | |
2022 | if (ret) | |
2023 | goto rel_m2m; | |
2024 | ||
2025 | /* Perform clk enable followed by reset */ | |
2026 | vpe_set_clock_enable(dev, 1); | |
2027 | ||
2028 | vpe_top_reset(dev); | |
2029 | ||
2030 | func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK, | |
2031 | VPE_PID_FUNC_SHIFT); | |
2032 | vpe_dbg(dev, "VPE PID function %x\n", func); | |
2033 | ||
2034 | vpe_top_vpdma_reset(dev); | |
2035 | ||
2036 | dev->vpdma = vpdma_create(pdev); | |
6676cafe WY |
2037 | if (IS_ERR(dev->vpdma)) { |
2038 | ret = PTR_ERR(dev->vpdma); | |
45719127 | 2039 | goto runtime_put; |
6676cafe | 2040 | } |
45719127 AT |
2041 | |
2042 | vfd = &dev->vfd; | |
2043 | *vfd = vpe_videodev; | |
2044 | vfd->lock = &dev->dev_mutex; | |
2045 | vfd->v4l2_dev = &dev->v4l2_dev; | |
2046 | ||
2047 | ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); | |
2048 | if (ret) { | |
2049 | vpe_err(dev, "Failed to register video device\n"); | |
2050 | goto runtime_put; | |
2051 | } | |
2052 | ||
2053 | video_set_drvdata(vfd, dev); | |
2054 | snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name); | |
2055 | dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n", | |
2056 | vfd->num); | |
2057 | ||
2058 | return 0; | |
2059 | ||
2060 | runtime_put: | |
2061 | vpe_runtime_put(pdev); | |
2062 | rel_m2m: | |
2063 | pm_runtime_disable(&pdev->dev); | |
2064 | v4l2_m2m_release(dev->m2m_dev); | |
2065 | rel_ctx: | |
2066 | vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); | |
2067 | v4l2_dev_unreg: | |
2068 | v4l2_device_unregister(&dev->v4l2_dev); | |
2069 | ||
2070 | return ret; | |
2071 | } | |
2072 | ||
2073 | static int vpe_remove(struct platform_device *pdev) | |
2074 | { | |
2075 | struct vpe_dev *dev = | |
2076 | (struct vpe_dev *) platform_get_drvdata(pdev); | |
2077 | ||
2078 | v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME); | |
2079 | ||
2080 | v4l2_m2m_release(dev->m2m_dev); | |
2081 | video_unregister_device(&dev->vfd); | |
2082 | v4l2_device_unregister(&dev->v4l2_dev); | |
2083 | vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); | |
2084 | ||
2085 | vpe_set_clock_enable(dev, 0); | |
2086 | vpe_runtime_put(pdev); | |
2087 | pm_runtime_disable(&pdev->dev); | |
2088 | ||
2089 | return 0; | |
2090 | } | |
2091 | ||
2092 | #if defined(CONFIG_OF) | |
2093 | static const struct of_device_id vpe_of_match[] = { | |
2094 | { | |
2095 | .compatible = "ti,vpe", | |
2096 | }, | |
2097 | {}, | |
2098 | }; | |
2099 | #else | |
2100 | #define vpe_of_match NULL | |
2101 | #endif | |
2102 | ||
2103 | static struct platform_driver vpe_pdrv = { | |
2104 | .probe = vpe_probe, | |
2105 | .remove = vpe_remove, | |
2106 | .driver = { | |
2107 | .name = VPE_MODULE_NAME, | |
2108 | .owner = THIS_MODULE, | |
2109 | .of_match_table = vpe_of_match, | |
2110 | }, | |
2111 | }; | |
2112 | ||
903cbb83 | 2113 | module_platform_driver(vpe_pdrv); |
45719127 AT |
2114 | |
2115 | MODULE_DESCRIPTION("TI VPE driver"); | |
2116 | MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>"); | |
2117 | MODULE_LICENSE("GPL"); |