Commit | Line | Data |
---|---|---|
45719127 AT |
1 | /* |
2 | * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver | |
3 | * | |
4 | * Copyright (c) 2013 Texas Instruments Inc. | |
5 | * David Griego, <dagriego@biglakesoftware.com> | |
6 | * Dale Farnsworth, <dale@farnsworth.org> | |
7 | * Archit Taneja, <archit@ti.com> | |
8 | * | |
9 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. | |
10 | * Pawel Osciak, <pawel@osciak.com> | |
11 | * Marek Szyprowski, <m.szyprowski@samsung.com> | |
12 | * | |
13 | * Based on the virtual v4l2-mem2mem example device | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms of the GNU General Public License version 2 as published by | |
17 | * the Free Software Foundation | |
18 | */ | |
19 | ||
20 | #include <linux/delay.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/fs.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/io.h> | |
26 | #include <linux/ioctl.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/platform_device.h> | |
29 | #include <linux/pm_runtime.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/videodev2.h> | |
a51cd8f5 | 33 | #include <linux/log2.h> |
45719127 AT |
34 | |
35 | #include <media/v4l2-common.h> | |
36 | #include <media/v4l2-ctrls.h> | |
37 | #include <media/v4l2-device.h> | |
38 | #include <media/v4l2-event.h> | |
39 | #include <media/v4l2-ioctl.h> | |
40 | #include <media/v4l2-mem2mem.h> | |
41 | #include <media/videobuf2-core.h> | |
42 | #include <media/videobuf2-dma-contig.h> | |
43 | ||
44 | #include "vpdma.h" | |
45 | #include "vpe_regs.h" | |
44687b2e | 46 | #include "sc.h" |
45719127 AT |
47 | |
48 | #define VPE_MODULE_NAME "vpe" | |
49 | ||
50 | /* minimum and maximum frame sizes */ | |
51 | #define MIN_W 128 | |
52 | #define MIN_H 128 | |
53 | #define MAX_W 1920 | |
54 | #define MAX_H 1080 | |
55 | ||
56 | /* required alignments */ | |
57 | #define S_ALIGN 0 /* multiple of 1 */ | |
58 | #define H_ALIGN 1 /* multiple of 2 */ | |
45719127 AT |
59 | |
60 | /* flags that indicate a format can be used for capture/output */ | |
61 | #define VPE_FMT_TYPE_CAPTURE (1 << 0) | |
62 | #define VPE_FMT_TYPE_OUTPUT (1 << 1) | |
63 | ||
64 | /* used as plane indices */ | |
65 | #define VPE_MAX_PLANES 2 | |
66 | #define VPE_LUMA 0 | |
67 | #define VPE_CHROMA 1 | |
68 | ||
69 | /* per m2m context info */ | |
585e6f01 AT |
70 | #define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */ |
71 | ||
45719127 AT |
72 | #define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */ |
73 | ||
74 | /* | |
75 | * each VPE context can need up to 3 config desciptors, 7 input descriptors, | |
76 | * 3 output descriptors, and 10 control descriptors | |
77 | */ | |
78 | #define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \ | |
79 | 13 * VPDMA_CFD_CTD_DESC_SIZE) | |
80 | ||
81 | #define vpe_dbg(vpedev, fmt, arg...) \ | |
82 | dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg) | |
83 | #define vpe_err(vpedev, fmt, arg...) \ | |
84 | dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg) | |
85 | ||
86 | struct vpe_us_coeffs { | |
87 | unsigned short anchor_fid0_c0; | |
88 | unsigned short anchor_fid0_c1; | |
89 | unsigned short anchor_fid0_c2; | |
90 | unsigned short anchor_fid0_c3; | |
91 | unsigned short interp_fid0_c0; | |
92 | unsigned short interp_fid0_c1; | |
93 | unsigned short interp_fid0_c2; | |
94 | unsigned short interp_fid0_c3; | |
95 | unsigned short anchor_fid1_c0; | |
96 | unsigned short anchor_fid1_c1; | |
97 | unsigned short anchor_fid1_c2; | |
98 | unsigned short anchor_fid1_c3; | |
99 | unsigned short interp_fid1_c0; | |
100 | unsigned short interp_fid1_c1; | |
101 | unsigned short interp_fid1_c2; | |
102 | unsigned short interp_fid1_c3; | |
103 | }; | |
104 | ||
105 | /* | |
106 | * Default upsampler coefficients | |
107 | */ | |
108 | static const struct vpe_us_coeffs us_coeffs[] = { | |
109 | { | |
110 | /* Coefficients for progressive input */ | |
111 | 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, | |
112 | 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, | |
113 | }, | |
585e6f01 AT |
114 | { |
115 | /* Coefficients for Top Field Interlaced input */ | |
116 | 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3, | |
117 | /* Coefficients for Bottom Field Interlaced input */ | |
118 | 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9, | |
119 | }, | |
120 | }; | |
121 | ||
122 | /* | |
123 | * the following registers are for configuring some of the parameters of the | |
124 | * motion and edge detection blocks inside DEI, these generally remain the same, | |
125 | * these could be passed later via userspace if some one needs to tweak these. | |
126 | */ | |
127 | struct vpe_dei_regs { | |
128 | unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */ | |
129 | unsigned long edi_config_reg; /* VPE_DEI_REG3 */ | |
130 | unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */ | |
131 | unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */ | |
132 | unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */ | |
133 | unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */ | |
134 | }; | |
135 | ||
136 | /* | |
137 | * default expert DEI register values, unlikely to be modified. | |
138 | */ | |
139 | static const struct vpe_dei_regs dei_regs = { | |
140 | 0x020C0804u, | |
141 | 0x0118100Fu, | |
142 | 0x08040200u, | |
143 | 0x1010100Cu, | |
144 | 0x10101010u, | |
145 | 0x10101010u, | |
45719127 AT |
146 | }; |
147 | ||
148 | /* | |
149 | * The port_data structure contains per-port data. | |
150 | */ | |
151 | struct vpe_port_data { | |
152 | enum vpdma_channel channel; /* VPDMA channel */ | |
585e6f01 | 153 | u8 vb_index; /* input frame f, f-1, f-2 index */ |
45719127 AT |
154 | u8 vb_part; /* plane index for co-panar formats */ |
155 | }; | |
156 | ||
157 | /* | |
158 | * Define indices into the port_data tables | |
159 | */ | |
160 | #define VPE_PORT_LUMA1_IN 0 | |
161 | #define VPE_PORT_CHROMA1_IN 1 | |
585e6f01 AT |
162 | #define VPE_PORT_LUMA2_IN 2 |
163 | #define VPE_PORT_CHROMA2_IN 3 | |
164 | #define VPE_PORT_LUMA3_IN 4 | |
165 | #define VPE_PORT_CHROMA3_IN 5 | |
166 | #define VPE_PORT_MV_IN 6 | |
167 | #define VPE_PORT_MV_OUT 7 | |
45719127 AT |
168 | #define VPE_PORT_LUMA_OUT 8 |
169 | #define VPE_PORT_CHROMA_OUT 9 | |
170 | #define VPE_PORT_RGB_OUT 10 | |
171 | ||
172 | static const struct vpe_port_data port_data[11] = { | |
173 | [VPE_PORT_LUMA1_IN] = { | |
174 | .channel = VPE_CHAN_LUMA1_IN, | |
585e6f01 | 175 | .vb_index = 0, |
45719127 AT |
176 | .vb_part = VPE_LUMA, |
177 | }, | |
178 | [VPE_PORT_CHROMA1_IN] = { | |
179 | .channel = VPE_CHAN_CHROMA1_IN, | |
585e6f01 AT |
180 | .vb_index = 0, |
181 | .vb_part = VPE_CHROMA, | |
182 | }, | |
183 | [VPE_PORT_LUMA2_IN] = { | |
184 | .channel = VPE_CHAN_LUMA2_IN, | |
185 | .vb_index = 1, | |
186 | .vb_part = VPE_LUMA, | |
187 | }, | |
188 | [VPE_PORT_CHROMA2_IN] = { | |
189 | .channel = VPE_CHAN_CHROMA2_IN, | |
190 | .vb_index = 1, | |
191 | .vb_part = VPE_CHROMA, | |
192 | }, | |
193 | [VPE_PORT_LUMA3_IN] = { | |
194 | .channel = VPE_CHAN_LUMA3_IN, | |
195 | .vb_index = 2, | |
196 | .vb_part = VPE_LUMA, | |
197 | }, | |
198 | [VPE_PORT_CHROMA3_IN] = { | |
199 | .channel = VPE_CHAN_CHROMA3_IN, | |
200 | .vb_index = 2, | |
45719127 AT |
201 | .vb_part = VPE_CHROMA, |
202 | }, | |
585e6f01 AT |
203 | [VPE_PORT_MV_IN] = { |
204 | .channel = VPE_CHAN_MV_IN, | |
205 | }, | |
206 | [VPE_PORT_MV_OUT] = { | |
207 | .channel = VPE_CHAN_MV_OUT, | |
208 | }, | |
45719127 AT |
209 | [VPE_PORT_LUMA_OUT] = { |
210 | .channel = VPE_CHAN_LUMA_OUT, | |
211 | .vb_part = VPE_LUMA, | |
212 | }, | |
213 | [VPE_PORT_CHROMA_OUT] = { | |
214 | .channel = VPE_CHAN_CHROMA_OUT, | |
215 | .vb_part = VPE_CHROMA, | |
216 | }, | |
217 | [VPE_PORT_RGB_OUT] = { | |
218 | .channel = VPE_CHAN_RGB_OUT, | |
219 | .vb_part = VPE_LUMA, | |
220 | }, | |
221 | }; | |
222 | ||
223 | ||
224 | /* driver info for each of the supported video formats */ | |
225 | struct vpe_fmt { | |
226 | char *name; /* human-readable name */ | |
227 | u32 fourcc; /* standard format identifier */ | |
228 | u8 types; /* CAPTURE and/or OUTPUT */ | |
229 | u8 coplanar; /* set for unpacked Luma and Chroma */ | |
230 | /* vpdma format info for each plane */ | |
231 | struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES]; | |
232 | }; | |
233 | ||
234 | static struct vpe_fmt vpe_formats[] = { | |
235 | { | |
236 | .name = "YUV 422 co-planar", | |
237 | .fourcc = V4L2_PIX_FMT_NV16, | |
238 | .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, | |
239 | .coplanar = 1, | |
240 | .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444], | |
241 | &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444], | |
242 | }, | |
243 | }, | |
244 | { | |
245 | .name = "YUV 420 co-planar", | |
246 | .fourcc = V4L2_PIX_FMT_NV12, | |
247 | .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, | |
248 | .coplanar = 1, | |
249 | .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420], | |
250 | &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420], | |
251 | }, | |
252 | }, | |
253 | { | |
254 | .name = "YUYV 422 packed", | |
255 | .fourcc = V4L2_PIX_FMT_YUYV, | |
256 | .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, | |
257 | .coplanar = 0, | |
258 | .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422], | |
259 | }, | |
260 | }, | |
261 | { | |
262 | .name = "UYVY 422 packed", | |
263 | .fourcc = V4L2_PIX_FMT_UYVY, | |
264 | .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, | |
265 | .coplanar = 0, | |
266 | .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422], | |
267 | }, | |
268 | }, | |
269 | }; | |
270 | ||
271 | /* | |
272 | * per-queue, driver-specific private data. | |
273 | * there is one source queue and one destination queue for each m2m context. | |
274 | */ | |
275 | struct vpe_q_data { | |
276 | unsigned int width; /* frame width */ | |
277 | unsigned int height; /* frame height */ | |
278 | unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */ | |
279 | enum v4l2_colorspace colorspace; | |
585e6f01 | 280 | enum v4l2_field field; /* supported field value */ |
45719127 AT |
281 | unsigned int flags; |
282 | unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */ | |
283 | struct v4l2_rect c_rect; /* crop/compose rectangle */ | |
284 | struct vpe_fmt *fmt; /* format info */ | |
285 | }; | |
286 | ||
287 | /* vpe_q_data flag bits */ | |
288 | #define Q_DATA_FRAME_1D (1 << 0) | |
289 | #define Q_DATA_MODE_TILED (1 << 1) | |
585e6f01 | 290 | #define Q_DATA_INTERLACED (1 << 2) |
45719127 AT |
291 | |
292 | enum { | |
293 | Q_DATA_SRC = 0, | |
294 | Q_DATA_DST = 1, | |
295 | }; | |
296 | ||
297 | /* find our format description corresponding to the passed v4l2_format */ | |
298 | static struct vpe_fmt *find_format(struct v4l2_format *f) | |
299 | { | |
300 | struct vpe_fmt *fmt; | |
301 | unsigned int k; | |
302 | ||
303 | for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) { | |
304 | fmt = &vpe_formats[k]; | |
305 | if (fmt->fourcc == f->fmt.pix.pixelformat) | |
306 | return fmt; | |
307 | } | |
308 | ||
309 | return NULL; | |
310 | } | |
311 | ||
312 | /* | |
313 | * there is one vpe_dev structure in the driver, it is shared by | |
314 | * all instances. | |
315 | */ | |
316 | struct vpe_dev { | |
317 | struct v4l2_device v4l2_dev; | |
318 | struct video_device vfd; | |
319 | struct v4l2_m2m_dev *m2m_dev; | |
320 | ||
321 | atomic_t num_instances; /* count of driver instances */ | |
322 | dma_addr_t loaded_mmrs; /* shadow mmrs in device */ | |
323 | struct mutex dev_mutex; | |
324 | spinlock_t lock; | |
325 | ||
326 | int irq; | |
327 | void __iomem *base; | |
44687b2e | 328 | struct resource *res; |
45719127 AT |
329 | |
330 | struct vb2_alloc_ctx *alloc_ctx; | |
331 | struct vpdma_data *vpdma; /* vpdma data handle */ | |
44687b2e | 332 | struct sc_data *sc; /* scaler data handle */ |
45719127 AT |
333 | }; |
334 | ||
335 | /* | |
336 | * There is one vpe_ctx structure for each m2m context. | |
337 | */ | |
338 | struct vpe_ctx { | |
339 | struct v4l2_fh fh; | |
340 | struct vpe_dev *dev; | |
341 | struct v4l2_m2m_ctx *m2m_ctx; | |
342 | struct v4l2_ctrl_handler hdl; | |
343 | ||
585e6f01 | 344 | unsigned int field; /* current field */ |
45719127 AT |
345 | unsigned int sequence; /* current frame/field seq */ |
346 | unsigned int aborting; /* abort after next irq */ | |
347 | ||
348 | unsigned int bufs_per_job; /* input buffers per batch */ | |
349 | unsigned int bufs_completed; /* bufs done in this batch */ | |
350 | ||
351 | struct vpe_q_data q_data[2]; /* src & dst queue data */ | |
585e6f01 | 352 | struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS]; |
45719127 AT |
353 | struct vb2_buffer *dst_vb; |
354 | ||
585e6f01 AT |
355 | dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */ |
356 | void *mv_buf[2]; /* virtual addrs of motion vector bufs */ | |
357 | size_t mv_buf_size; /* current motion vector buffer size */ | |
45719127 | 358 | struct vpdma_buf mmr_adb; /* shadow reg addr/data block */ |
773f0657 AT |
359 | struct vpdma_buf sc_coeff_h; /* h coeff buffer */ |
360 | struct vpdma_buf sc_coeff_v; /* v coeff buffer */ | |
45719127 AT |
361 | struct vpdma_desc_list desc_list; /* DMA descriptor list */ |
362 | ||
585e6f01 | 363 | bool deinterlacing; /* using de-interlacer */ |
45719127 | 364 | bool load_mmrs; /* have new shadow reg values */ |
585e6f01 AT |
365 | |
366 | unsigned int src_mv_buf_selector; | |
45719127 AT |
367 | }; |
368 | ||
369 | ||
370 | /* | |
371 | * M2M devices get 2 queues. | |
372 | * Return the queue given the type. | |
373 | */ | |
374 | static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx, | |
375 | enum v4l2_buf_type type) | |
376 | { | |
377 | switch (type) { | |
378 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: | |
379 | return &ctx->q_data[Q_DATA_SRC]; | |
380 | case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: | |
381 | return &ctx->q_data[Q_DATA_DST]; | |
382 | default: | |
383 | BUG(); | |
384 | } | |
385 | return NULL; | |
386 | } | |
387 | ||
388 | static u32 read_reg(struct vpe_dev *dev, int offset) | |
389 | { | |
390 | return ioread32(dev->base + offset); | |
391 | } | |
392 | ||
393 | static void write_reg(struct vpe_dev *dev, int offset, u32 value) | |
394 | { | |
395 | iowrite32(value, dev->base + offset); | |
396 | } | |
397 | ||
398 | /* register field read/write helpers */ | |
399 | static int get_field(u32 value, u32 mask, int shift) | |
400 | { | |
401 | return (value & (mask << shift)) >> shift; | |
402 | } | |
403 | ||
404 | static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift) | |
405 | { | |
406 | return get_field(read_reg(dev, offset), mask, shift); | |
407 | } | |
408 | ||
409 | static void write_field(u32 *valp, u32 field, u32 mask, int shift) | |
410 | { | |
411 | u32 val = *valp; | |
412 | ||
413 | val &= ~(mask << shift); | |
414 | val |= (field & mask) << shift; | |
415 | *valp = val; | |
416 | } | |
417 | ||
418 | static void write_field_reg(struct vpe_dev *dev, int offset, u32 field, | |
419 | u32 mask, int shift) | |
420 | { | |
421 | u32 val = read_reg(dev, offset); | |
422 | ||
423 | write_field(&val, field, mask, shift); | |
424 | ||
425 | write_reg(dev, offset, val); | |
426 | } | |
427 | ||
428 | /* | |
429 | * DMA address/data block for the shadow registers | |
430 | */ | |
431 | struct vpe_mmr_adb { | |
432 | struct vpdma_adb_hdr out_fmt_hdr; | |
433 | u32 out_fmt_reg[1]; | |
434 | u32 out_fmt_pad[3]; | |
435 | struct vpdma_adb_hdr us1_hdr; | |
436 | u32 us1_regs[8]; | |
437 | struct vpdma_adb_hdr us2_hdr; | |
438 | u32 us2_regs[8]; | |
439 | struct vpdma_adb_hdr us3_hdr; | |
440 | u32 us3_regs[8]; | |
441 | struct vpdma_adb_hdr dei_hdr; | |
585e6f01 | 442 | u32 dei_regs[8]; |
bbee8b39 AT |
443 | struct vpdma_adb_hdr sc_hdr0; |
444 | u32 sc_regs0[7]; | |
445 | u32 sc_pad0[1]; | |
446 | struct vpdma_adb_hdr sc_hdr8; | |
447 | u32 sc_regs8[6]; | |
448 | u32 sc_pad8[2]; | |
449 | struct vpdma_adb_hdr sc_hdr17; | |
450 | u32 sc_regs17[9]; | |
451 | u32 sc_pad17[3]; | |
45719127 AT |
452 | struct vpdma_adb_hdr csc_hdr; |
453 | u32 csc_regs[6]; | |
454 | u32 csc_pad[2]; | |
455 | }; | |
456 | ||
44687b2e AT |
457 | #define GET_OFFSET_TOP(ctx, obj, reg) \ |
458 | ((obj)->res->start - ctx->dev->res->start + reg) | |
459 | ||
45719127 AT |
460 | #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \ |
461 | VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a) | |
462 | /* | |
463 | * Set the headers for all of the address/data block structures. | |
464 | */ | |
465 | static void init_adb_hdrs(struct vpe_ctx *ctx) | |
466 | { | |
467 | VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT); | |
468 | VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0); | |
469 | VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0); | |
470 | VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0); | |
471 | VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE); | |
bbee8b39 | 472 | VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0, |
44687b2e | 473 | GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0)); |
bbee8b39 AT |
474 | VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8, |
475 | GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8)); | |
476 | VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17, | |
477 | GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17)); | |
45719127 AT |
478 | VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00); |
479 | }; | |
480 | ||
585e6f01 AT |
481 | /* |
482 | * Allocate or re-allocate the motion vector DMA buffers | |
483 | * There are two buffers, one for input and one for output. | |
484 | * However, the roles are reversed after each field is processed. | |
485 | * In other words, after each field is processed, the previous | |
486 | * output (dst) MV buffer becomes the new input (src) MV buffer. | |
487 | */ | |
488 | static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size) | |
489 | { | |
490 | struct device *dev = ctx->dev->v4l2_dev.dev; | |
491 | ||
492 | if (ctx->mv_buf_size == size) | |
493 | return 0; | |
494 | ||
495 | if (ctx->mv_buf[0]) | |
496 | dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0], | |
497 | ctx->mv_buf_dma[0]); | |
498 | ||
499 | if (ctx->mv_buf[1]) | |
500 | dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1], | |
501 | ctx->mv_buf_dma[1]); | |
502 | ||
503 | if (size == 0) | |
504 | return 0; | |
505 | ||
506 | ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0], | |
507 | GFP_KERNEL); | |
508 | if (!ctx->mv_buf[0]) { | |
509 | vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); | |
510 | return -ENOMEM; | |
511 | } | |
512 | ||
513 | ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1], | |
514 | GFP_KERNEL); | |
515 | if (!ctx->mv_buf[1]) { | |
516 | vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); | |
517 | dma_free_coherent(dev, size, ctx->mv_buf[0], | |
518 | ctx->mv_buf_dma[0]); | |
519 | ||
520 | return -ENOMEM; | |
521 | } | |
522 | ||
523 | ctx->mv_buf_size = size; | |
524 | ctx->src_mv_buf_selector = 0; | |
525 | ||
526 | return 0; | |
527 | } | |
528 | ||
529 | static void free_mv_buffers(struct vpe_ctx *ctx) | |
530 | { | |
531 | realloc_mv_buffers(ctx, 0); | |
532 | } | |
533 | ||
534 | /* | |
535 | * While de-interlacing, we keep the two most recent input buffers | |
536 | * around. This function frees those two buffers when we have | |
537 | * finished processing the current stream. | |
538 | */ | |
539 | static void free_vbs(struct vpe_ctx *ctx) | |
540 | { | |
541 | struct vpe_dev *dev = ctx->dev; | |
542 | unsigned long flags; | |
543 | ||
544 | if (ctx->src_vbs[2] == NULL) | |
545 | return; | |
546 | ||
547 | spin_lock_irqsave(&dev->lock, flags); | |
548 | if (ctx->src_vbs[2]) { | |
549 | v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE); | |
550 | v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE); | |
551 | } | |
552 | spin_unlock_irqrestore(&dev->lock, flags); | |
553 | } | |
554 | ||
45719127 AT |
555 | /* |
556 | * Enable or disable the VPE clocks | |
557 | */ | |
558 | static void vpe_set_clock_enable(struct vpe_dev *dev, bool on) | |
559 | { | |
560 | u32 val = 0; | |
561 | ||
562 | if (on) | |
563 | val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE; | |
564 | write_reg(dev, VPE_CLK_ENABLE, val); | |
565 | } | |
566 | ||
567 | static void vpe_top_reset(struct vpe_dev *dev) | |
568 | { | |
569 | ||
570 | write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK, | |
571 | VPE_DATA_PATH_CLK_RESET_SHIFT); | |
572 | ||
573 | usleep_range(100, 150); | |
574 | ||
575 | write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK, | |
576 | VPE_DATA_PATH_CLK_RESET_SHIFT); | |
577 | } | |
578 | ||
579 | static void vpe_top_vpdma_reset(struct vpe_dev *dev) | |
580 | { | |
581 | write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK, | |
582 | VPE_VPDMA_CLK_RESET_SHIFT); | |
583 | ||
584 | usleep_range(100, 150); | |
585 | ||
586 | write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK, | |
587 | VPE_VPDMA_CLK_RESET_SHIFT); | |
588 | } | |
589 | ||
590 | /* | |
591 | * Load the correct of upsampler coefficients into the shadow MMRs | |
592 | */ | |
593 | static void set_us_coefficients(struct vpe_ctx *ctx) | |
594 | { | |
595 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
585e6f01 | 596 | struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; |
45719127 AT |
597 | u32 *us1_reg = &mmr_adb->us1_regs[0]; |
598 | u32 *us2_reg = &mmr_adb->us2_regs[0]; | |
599 | u32 *us3_reg = &mmr_adb->us3_regs[0]; | |
600 | const unsigned short *cp, *end_cp; | |
601 | ||
602 | cp = &us_coeffs[0].anchor_fid0_c0; | |
603 | ||
585e6f01 AT |
604 | if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */ |
605 | cp += sizeof(us_coeffs[0]) / sizeof(*cp); | |
606 | ||
45719127 AT |
607 | end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp); |
608 | ||
609 | while (cp < end_cp) { | |
610 | write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT); | |
611 | write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT); | |
612 | *us2_reg++ = *us1_reg; | |
613 | *us3_reg++ = *us1_reg++; | |
614 | } | |
615 | ctx->load_mmrs = true; | |
616 | } | |
617 | ||
618 | /* | |
619 | * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs. | |
620 | */ | |
621 | static void set_cfg_and_line_modes(struct vpe_ctx *ctx) | |
622 | { | |
623 | struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt; | |
624 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
625 | u32 *us1_reg0 = &mmr_adb->us1_regs[0]; | |
626 | u32 *us2_reg0 = &mmr_adb->us2_regs[0]; | |
627 | u32 *us3_reg0 = &mmr_adb->us3_regs[0]; | |
628 | int line_mode = 1; | |
629 | int cfg_mode = 1; | |
630 | ||
631 | /* | |
632 | * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing. | |
633 | * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing. | |
634 | */ | |
635 | ||
636 | if (fmt->fourcc == V4L2_PIX_FMT_NV12) { | |
637 | cfg_mode = 0; | |
638 | line_mode = 0; /* double lines to line buffer */ | |
639 | } | |
640 | ||
641 | write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); | |
642 | write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); | |
643 | write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); | |
644 | ||
645 | /* regs for now */ | |
646 | vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN); | |
585e6f01 AT |
647 | vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN); |
648 | vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN); | |
45719127 AT |
649 | |
650 | /* frame start for input luma */ | |
651 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
652 | VPE_CHAN_LUMA1_IN); | |
585e6f01 AT |
653 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, |
654 | VPE_CHAN_LUMA2_IN); | |
655 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
656 | VPE_CHAN_LUMA3_IN); | |
45719127 AT |
657 | |
658 | /* frame start for input chroma */ | |
659 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
660 | VPE_CHAN_CHROMA1_IN); | |
585e6f01 AT |
661 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, |
662 | VPE_CHAN_CHROMA2_IN); | |
663 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
664 | VPE_CHAN_CHROMA3_IN); | |
665 | ||
666 | /* frame start for MV in client */ | |
667 | vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, | |
668 | VPE_CHAN_MV_IN); | |
45719127 AT |
669 | |
670 | ctx->load_mmrs = true; | |
671 | } | |
672 | ||
673 | /* | |
674 | * Set the shadow registers that are modified when the source | |
675 | * format changes. | |
676 | */ | |
677 | static void set_src_registers(struct vpe_ctx *ctx) | |
678 | { | |
679 | set_us_coefficients(ctx); | |
680 | } | |
681 | ||
682 | /* | |
683 | * Set the shadow registers that are modified when the destination | |
684 | * format changes. | |
685 | */ | |
686 | static void set_dst_registers(struct vpe_ctx *ctx) | |
687 | { | |
688 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
689 | struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt; | |
690 | u32 val = 0; | |
691 | ||
692 | /* select RGB path when color space conversion is supported in future */ | |
693 | if (fmt->fourcc == V4L2_PIX_FMT_RGB24) | |
694 | val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER; | |
695 | else if (fmt->fourcc == V4L2_PIX_FMT_NV16) | |
696 | val |= VPE_COLOR_SEPARATE_422; | |
697 | ||
698 | /* The source of CHR_DS is always the scaler, whether it's used or not */ | |
699 | val |= VPE_DS_SRC_DEI_SCALER; | |
700 | ||
701 | if (fmt->fourcc != V4L2_PIX_FMT_NV12) | |
702 | val |= VPE_DS_BYPASS; | |
703 | ||
704 | mmr_adb->out_fmt_reg[0] = val; | |
705 | ||
706 | ctx->load_mmrs = true; | |
707 | } | |
708 | ||
709 | /* | |
710 | * Set the de-interlacer shadow register values | |
711 | */ | |
585e6f01 | 712 | static void set_dei_regs(struct vpe_ctx *ctx) |
45719127 AT |
713 | { |
714 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
715 | struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; | |
716 | unsigned int src_h = s_q_data->c_rect.height; | |
717 | unsigned int src_w = s_q_data->c_rect.width; | |
718 | u32 *dei_mmr0 = &mmr_adb->dei_regs[0]; | |
585e6f01 | 719 | bool deinterlace = true; |
45719127 AT |
720 | u32 val = 0; |
721 | ||
722 | /* | |
723 | * according to TRM, we should set DEI in progressive bypass mode when | |
724 | * the input content is progressive, however, DEI is bypassed correctly | |
725 | * for both progressive and interlace content in interlace bypass mode. | |
726 | * It has been recommended not to use progressive bypass mode. | |
727 | */ | |
585e6f01 AT |
728 | if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) || |
729 | !(s_q_data->flags & Q_DATA_INTERLACED)) { | |
730 | deinterlace = false; | |
731 | val = VPE_DEI_INTERLACE_BYPASS; | |
732 | } | |
733 | ||
734 | src_h = deinterlace ? src_h * 2 : src_h; | |
45719127 AT |
735 | |
736 | val |= (src_h << VPE_DEI_HEIGHT_SHIFT) | | |
737 | (src_w << VPE_DEI_WIDTH_SHIFT) | | |
738 | VPE_DEI_FIELD_FLUSH; | |
739 | ||
740 | *dei_mmr0 = val; | |
741 | ||
742 | ctx->load_mmrs = true; | |
743 | } | |
744 | ||
585e6f01 AT |
745 | static void set_dei_shadow_registers(struct vpe_ctx *ctx) |
746 | { | |
747 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
748 | u32 *dei_mmr = &mmr_adb->dei_regs[0]; | |
749 | const struct vpe_dei_regs *cur = &dei_regs; | |
750 | ||
751 | dei_mmr[2] = cur->mdt_spacial_freq_thr_reg; | |
752 | dei_mmr[3] = cur->edi_config_reg; | |
753 | dei_mmr[4] = cur->edi_lut_reg0; | |
754 | dei_mmr[5] = cur->edi_lut_reg1; | |
755 | dei_mmr[6] = cur->edi_lut_reg2; | |
756 | dei_mmr[7] = cur->edi_lut_reg3; | |
757 | ||
758 | ctx->load_mmrs = true; | |
759 | } | |
760 | ||
45719127 AT |
761 | static void set_csc_coeff_bypass(struct vpe_ctx *ctx) |
762 | { | |
763 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; | |
764 | u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5]; | |
765 | ||
766 | *shadow_csc_reg5 |= VPE_CSC_BYPASS; | |
767 | ||
768 | ctx->load_mmrs = true; | |
769 | } | |
770 | ||
45719127 AT |
771 | /* |
772 | * Set the shadow registers whose values are modified when either the | |
773 | * source or destination format is changed. | |
774 | */ | |
775 | static int set_srcdst_params(struct vpe_ctx *ctx) | |
776 | { | |
585e6f01 AT |
777 | struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; |
778 | struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; | |
44687b2e | 779 | struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; |
773f0657 AT |
780 | unsigned int src_w = s_q_data->c_rect.width; |
781 | unsigned int src_h = s_q_data->c_rect.height; | |
782 | unsigned int dst_w = d_q_data->c_rect.width; | |
783 | unsigned int dst_h = d_q_data->c_rect.height; | |
585e6f01 AT |
784 | size_t mv_buf_size; |
785 | int ret; | |
786 | ||
45719127 | 787 | ctx->sequence = 0; |
585e6f01 AT |
788 | ctx->field = V4L2_FIELD_TOP; |
789 | ||
790 | if ((s_q_data->flags & Q_DATA_INTERLACED) && | |
791 | !(d_q_data->flags & Q_DATA_INTERLACED)) { | |
a51cd8f5 | 792 | int bytes_per_line; |
585e6f01 AT |
793 | const struct vpdma_data_format *mv = |
794 | &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; | |
795 | ||
a51cd8f5 AT |
796 | /* |
797 | * we make sure that the source image has a 16 byte aligned | |
798 | * stride, we need to do the same for the motion vector buffer | |
799 | * by aligning it's stride to the next 16 byte boundry. this | |
800 | * extra space will not be used by the de-interlacer, but will | |
801 | * ensure that vpdma operates correctly | |
802 | */ | |
803 | bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3, | |
804 | VPDMA_STRIDE_ALIGN); | |
805 | mv_buf_size = bytes_per_line * s_q_data->height; | |
773f0657 AT |
806 | |
807 | ctx->deinterlacing = 1; | |
808 | src_h <<= 1; | |
585e6f01 AT |
809 | } else { |
810 | ctx->deinterlacing = 0; | |
811 | mv_buf_size = 0; | |
812 | } | |
813 | ||
814 | free_vbs(ctx); | |
815 | ||
816 | ret = realloc_mv_buffers(ctx, mv_buf_size); | |
817 | if (ret) | |
818 | return ret; | |
45719127 AT |
819 | |
820 | set_cfg_and_line_modes(ctx); | |
585e6f01 | 821 | set_dei_regs(ctx); |
45719127 | 822 | set_csc_coeff_bypass(ctx); |
bbee8b39 | 823 | |
773f0657 AT |
824 | sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w); |
825 | sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h); | |
bbee8b39 AT |
826 | |
827 | sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0], | |
828 | &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0], | |
829 | src_w, src_h, dst_w, dst_h); | |
45719127 AT |
830 | |
831 | return 0; | |
832 | } | |
833 | ||
834 | /* | |
835 | * Return the vpe_ctx structure for a given struct file | |
836 | */ | |
837 | static struct vpe_ctx *file2ctx(struct file *file) | |
838 | { | |
839 | return container_of(file->private_data, struct vpe_ctx, fh); | |
840 | } | |
841 | ||
842 | /* | |
843 | * mem2mem callbacks | |
844 | */ | |
845 | ||
846 | /** | |
847 | * job_ready() - check whether an instance is ready to be scheduled to run | |
848 | */ | |
849 | static int job_ready(void *priv) | |
850 | { | |
851 | struct vpe_ctx *ctx = priv; | |
852 | int needed = ctx->bufs_per_job; | |
853 | ||
585e6f01 AT |
854 | if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) |
855 | needed += 2; /* need additional two most recent fields */ | |
856 | ||
45719127 AT |
857 | if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed) |
858 | return 0; | |
859 | ||
860 | return 1; | |
861 | } | |
862 | ||
863 | static void job_abort(void *priv) | |
864 | { | |
865 | struct vpe_ctx *ctx = priv; | |
866 | ||
867 | /* Will cancel the transaction in the next interrupt handler */ | |
868 | ctx->aborting = 1; | |
869 | } | |
870 | ||
871 | /* | |
872 | * Lock access to the device | |
873 | */ | |
874 | static void vpe_lock(void *priv) | |
875 | { | |
876 | struct vpe_ctx *ctx = priv; | |
877 | struct vpe_dev *dev = ctx->dev; | |
878 | mutex_lock(&dev->dev_mutex); | |
879 | } | |
880 | ||
881 | static void vpe_unlock(void *priv) | |
882 | { | |
883 | struct vpe_ctx *ctx = priv; | |
884 | struct vpe_dev *dev = ctx->dev; | |
885 | mutex_unlock(&dev->dev_mutex); | |
886 | } | |
887 | ||
888 | static void vpe_dump_regs(struct vpe_dev *dev) | |
889 | { | |
890 | #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r)) | |
891 | ||
892 | vpe_dbg(dev, "VPE Registers:\n"); | |
893 | ||
894 | DUMPREG(PID); | |
895 | DUMPREG(SYSCONFIG); | |
896 | DUMPREG(INT0_STATUS0_RAW); | |
897 | DUMPREG(INT0_STATUS0); | |
898 | DUMPREG(INT0_ENABLE0); | |
899 | DUMPREG(INT0_STATUS1_RAW); | |
900 | DUMPREG(INT0_STATUS1); | |
901 | DUMPREG(INT0_ENABLE1); | |
902 | DUMPREG(CLK_ENABLE); | |
903 | DUMPREG(CLK_RESET); | |
904 | DUMPREG(CLK_FORMAT_SELECT); | |
905 | DUMPREG(CLK_RANGE_MAP); | |
906 | DUMPREG(US1_R0); | |
907 | DUMPREG(US1_R1); | |
908 | DUMPREG(US1_R2); | |
909 | DUMPREG(US1_R3); | |
910 | DUMPREG(US1_R4); | |
911 | DUMPREG(US1_R5); | |
912 | DUMPREG(US1_R6); | |
913 | DUMPREG(US1_R7); | |
914 | DUMPREG(US2_R0); | |
915 | DUMPREG(US2_R1); | |
916 | DUMPREG(US2_R2); | |
917 | DUMPREG(US2_R3); | |
918 | DUMPREG(US2_R4); | |
919 | DUMPREG(US2_R5); | |
920 | DUMPREG(US2_R6); | |
921 | DUMPREG(US2_R7); | |
922 | DUMPREG(US3_R0); | |
923 | DUMPREG(US3_R1); | |
924 | DUMPREG(US3_R2); | |
925 | DUMPREG(US3_R3); | |
926 | DUMPREG(US3_R4); | |
927 | DUMPREG(US3_R5); | |
928 | DUMPREG(US3_R6); | |
929 | DUMPREG(US3_R7); | |
930 | DUMPREG(DEI_FRAME_SIZE); | |
931 | DUMPREG(MDT_BYPASS); | |
932 | DUMPREG(MDT_SF_THRESHOLD); | |
933 | DUMPREG(EDI_CONFIG); | |
934 | DUMPREG(DEI_EDI_LUT_R0); | |
935 | DUMPREG(DEI_EDI_LUT_R1); | |
936 | DUMPREG(DEI_EDI_LUT_R2); | |
937 | DUMPREG(DEI_EDI_LUT_R3); | |
938 | DUMPREG(DEI_FMD_WINDOW_R0); | |
939 | DUMPREG(DEI_FMD_WINDOW_R1); | |
940 | DUMPREG(DEI_FMD_CONTROL_R0); | |
941 | DUMPREG(DEI_FMD_CONTROL_R1); | |
942 | DUMPREG(DEI_FMD_STATUS_R0); | |
943 | DUMPREG(DEI_FMD_STATUS_R1); | |
944 | DUMPREG(DEI_FMD_STATUS_R2); | |
45719127 AT |
945 | DUMPREG(CSC_CSC00); |
946 | DUMPREG(CSC_CSC01); | |
947 | DUMPREG(CSC_CSC02); | |
948 | DUMPREG(CSC_CSC03); | |
949 | DUMPREG(CSC_CSC04); | |
950 | DUMPREG(CSC_CSC05); | |
951 | #undef DUMPREG | |
44687b2e AT |
952 | |
953 | sc_dump_regs(dev->sc); | |
45719127 AT |
954 | } |
955 | ||
956 | static void add_out_dtd(struct vpe_ctx *ctx, int port) | |
957 | { | |
958 | struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST]; | |
959 | const struct vpe_port_data *p_data = &port_data[port]; | |
960 | struct vb2_buffer *vb = ctx->dst_vb; | |
961 | struct v4l2_rect *c_rect = &q_data->c_rect; | |
962 | struct vpe_fmt *fmt = q_data->fmt; | |
963 | const struct vpdma_data_format *vpdma_fmt; | |
585e6f01 | 964 | int mv_buf_selector = !ctx->src_mv_buf_selector; |
45719127 AT |
965 | dma_addr_t dma_addr; |
966 | u32 flags = 0; | |
967 | ||
585e6f01 AT |
968 | if (port == VPE_PORT_MV_OUT) { |
969 | vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; | |
970 | dma_addr = ctx->mv_buf_dma[mv_buf_selector]; | |
971 | } else { | |
972 | /* to incorporate interleaved formats */ | |
973 | int plane = fmt->coplanar ? p_data->vb_part : 0; | |
974 | ||
975 | vpdma_fmt = fmt->vpdma_fmt[plane]; | |
976 | dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); | |
977 | if (!dma_addr) { | |
978 | vpe_err(ctx->dev, | |
979 | "acquiring output buffer(%d) dma_addr failed\n", | |
980 | port); | |
981 | return; | |
982 | } | |
45719127 AT |
983 | } |
984 | ||
985 | if (q_data->flags & Q_DATA_FRAME_1D) | |
986 | flags |= VPDMA_DATA_FRAME_1D; | |
987 | if (q_data->flags & Q_DATA_MODE_TILED) | |
988 | flags |= VPDMA_DATA_MODE_TILED; | |
989 | ||
990 | vpdma_add_out_dtd(&ctx->desc_list, c_rect, vpdma_fmt, dma_addr, | |
991 | p_data->channel, flags); | |
992 | } | |
993 | ||
994 | static void add_in_dtd(struct vpe_ctx *ctx, int port) | |
995 | { | |
996 | struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC]; | |
997 | const struct vpe_port_data *p_data = &port_data[port]; | |
585e6f01 | 998 | struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index]; |
45719127 AT |
999 | struct v4l2_rect *c_rect = &q_data->c_rect; |
1000 | struct vpe_fmt *fmt = q_data->fmt; | |
1001 | const struct vpdma_data_format *vpdma_fmt; | |
585e6f01 AT |
1002 | int mv_buf_selector = ctx->src_mv_buf_selector; |
1003 | int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM; | |
45719127 AT |
1004 | dma_addr_t dma_addr; |
1005 | u32 flags = 0; | |
1006 | ||
585e6f01 AT |
1007 | if (port == VPE_PORT_MV_IN) { |
1008 | vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; | |
1009 | dma_addr = ctx->mv_buf_dma[mv_buf_selector]; | |
1010 | } else { | |
1011 | /* to incorporate interleaved formats */ | |
1012 | int plane = fmt->coplanar ? p_data->vb_part : 0; | |
45719127 | 1013 | |
585e6f01 AT |
1014 | vpdma_fmt = fmt->vpdma_fmt[plane]; |
1015 | ||
1016 | dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); | |
1017 | if (!dma_addr) { | |
1018 | vpe_err(ctx->dev, | |
1019 | "acquiring input buffer(%d) dma_addr failed\n", | |
1020 | port); | |
1021 | return; | |
1022 | } | |
45719127 AT |
1023 | } |
1024 | ||
1025 | if (q_data->flags & Q_DATA_FRAME_1D) | |
1026 | flags |= VPDMA_DATA_FRAME_1D; | |
1027 | if (q_data->flags & Q_DATA_MODE_TILED) | |
1028 | flags |= VPDMA_DATA_MODE_TILED; | |
1029 | ||
1030 | vpdma_add_in_dtd(&ctx->desc_list, q_data->width, q_data->height, | |
1031 | c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags); | |
1032 | } | |
1033 | ||
1034 | /* | |
1035 | * Enable the expected IRQ sources | |
1036 | */ | |
1037 | static void enable_irqs(struct vpe_ctx *ctx) | |
1038 | { | |
1039 | write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE); | |
585e6f01 AT |
1040 | write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT | |
1041 | VPE_DS1_UV_ERROR_INT); | |
45719127 AT |
1042 | |
1043 | vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true); | |
1044 | } | |
1045 | ||
1046 | static void disable_irqs(struct vpe_ctx *ctx) | |
1047 | { | |
1048 | write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff); | |
1049 | write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff); | |
1050 | ||
1051 | vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false); | |
1052 | } | |
1053 | ||
1054 | /* device_run() - prepares and starts the device | |
1055 | * | |
1056 | * This function is only called when both the source and destination | |
1057 | * buffers are in place. | |
1058 | */ | |
1059 | static void device_run(void *priv) | |
1060 | { | |
1061 | struct vpe_ctx *ctx = priv; | |
773f0657 | 1062 | struct sc_data *sc = ctx->dev->sc; |
45719127 AT |
1063 | struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; |
1064 | ||
585e6f01 AT |
1065 | if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) { |
1066 | ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); | |
1067 | WARN_ON(ctx->src_vbs[2] == NULL); | |
1068 | ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); | |
1069 | WARN_ON(ctx->src_vbs[1] == NULL); | |
1070 | } | |
1071 | ||
1072 | ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); | |
1073 | WARN_ON(ctx->src_vbs[0] == NULL); | |
45719127 AT |
1074 | ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); |
1075 | WARN_ON(ctx->dst_vb == NULL); | |
1076 | ||
1077 | /* config descriptors */ | |
1078 | if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) { | |
1079 | vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb); | |
1080 | vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb); | |
1081 | ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr; | |
1082 | ctx->load_mmrs = false; | |
1083 | } | |
1084 | ||
773f0657 AT |
1085 | if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr || |
1086 | sc->load_coeff_h) { | |
1087 | vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h); | |
1088 | vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT, | |
1089 | &ctx->sc_coeff_h, 0); | |
1090 | ||
1091 | sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr; | |
1092 | sc->load_coeff_h = false; | |
1093 | } | |
1094 | ||
1095 | if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr || | |
1096 | sc->load_coeff_v) { | |
1097 | vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v); | |
1098 | vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT, | |
1099 | &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4); | |
1100 | ||
1101 | sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr; | |
1102 | sc->load_coeff_v = false; | |
1103 | } | |
1104 | ||
585e6f01 AT |
1105 | /* output data descriptors */ |
1106 | if (ctx->deinterlacing) | |
1107 | add_out_dtd(ctx, VPE_PORT_MV_OUT); | |
1108 | ||
45719127 AT |
1109 | add_out_dtd(ctx, VPE_PORT_LUMA_OUT); |
1110 | if (d_q_data->fmt->coplanar) | |
1111 | add_out_dtd(ctx, VPE_PORT_CHROMA_OUT); | |
1112 | ||
585e6f01 AT |
1113 | /* input data descriptors */ |
1114 | if (ctx->deinterlacing) { | |
1115 | add_in_dtd(ctx, VPE_PORT_LUMA3_IN); | |
1116 | add_in_dtd(ctx, VPE_PORT_CHROMA3_IN); | |
1117 | ||
1118 | add_in_dtd(ctx, VPE_PORT_LUMA2_IN); | |
1119 | add_in_dtd(ctx, VPE_PORT_CHROMA2_IN); | |
1120 | } | |
1121 | ||
45719127 AT |
1122 | add_in_dtd(ctx, VPE_PORT_LUMA1_IN); |
1123 | add_in_dtd(ctx, VPE_PORT_CHROMA1_IN); | |
1124 | ||
585e6f01 AT |
1125 | if (ctx->deinterlacing) |
1126 | add_in_dtd(ctx, VPE_PORT_MV_IN); | |
1127 | ||
45719127 AT |
1128 | /* sync on channel control descriptors for input ports */ |
1129 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN); | |
1130 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN); | |
1131 | ||
585e6f01 AT |
1132 | if (ctx->deinterlacing) { |
1133 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, | |
1134 | VPE_CHAN_LUMA2_IN); | |
1135 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, | |
1136 | VPE_CHAN_CHROMA2_IN); | |
1137 | ||
1138 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, | |
1139 | VPE_CHAN_LUMA3_IN); | |
1140 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, | |
1141 | VPE_CHAN_CHROMA3_IN); | |
1142 | ||
1143 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN); | |
1144 | } | |
1145 | ||
45719127 AT |
1146 | /* sync on channel control descriptors for output ports */ |
1147 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT); | |
1148 | if (d_q_data->fmt->coplanar) | |
1149 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT); | |
1150 | ||
585e6f01 AT |
1151 | if (ctx->deinterlacing) |
1152 | vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT); | |
1153 | ||
45719127 AT |
1154 | enable_irqs(ctx); |
1155 | ||
1156 | vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf); | |
1157 | vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list); | |
1158 | } | |
1159 | ||
585e6f01 AT |
1160 | static void dei_error(struct vpe_ctx *ctx) |
1161 | { | |
1162 | dev_warn(ctx->dev->v4l2_dev.dev, | |
1163 | "received DEI error interrupt\n"); | |
1164 | } | |
1165 | ||
45719127 AT |
1166 | static void ds1_uv_error(struct vpe_ctx *ctx) |
1167 | { | |
1168 | dev_warn(ctx->dev->v4l2_dev.dev, | |
1169 | "received downsampler error interrupt\n"); | |
1170 | } | |
1171 | ||
1172 | static irqreturn_t vpe_irq(int irq_vpe, void *data) | |
1173 | { | |
1174 | struct vpe_dev *dev = (struct vpe_dev *)data; | |
1175 | struct vpe_ctx *ctx; | |
585e6f01 | 1176 | struct vpe_q_data *d_q_data; |
45719127 AT |
1177 | struct vb2_buffer *s_vb, *d_vb; |
1178 | struct v4l2_buffer *s_buf, *d_buf; | |
1179 | unsigned long flags; | |
1180 | u32 irqst0, irqst1; | |
1181 | ||
1182 | irqst0 = read_reg(dev, VPE_INT0_STATUS0); | |
1183 | if (irqst0) { | |
1184 | write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0); | |
1185 | vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0); | |
1186 | } | |
1187 | ||
1188 | irqst1 = read_reg(dev, VPE_INT0_STATUS1); | |
1189 | if (irqst1) { | |
1190 | write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1); | |
1191 | vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1); | |
1192 | } | |
1193 | ||
1194 | ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); | |
1195 | if (!ctx) { | |
1196 | vpe_err(dev, "instance released before end of transaction\n"); | |
1197 | goto handled; | |
1198 | } | |
1199 | ||
585e6f01 AT |
1200 | if (irqst1) { |
1201 | if (irqst1 & VPE_DEI_ERROR_INT) { | |
1202 | irqst1 &= ~VPE_DEI_ERROR_INT; | |
1203 | dei_error(ctx); | |
1204 | } | |
1205 | if (irqst1 & VPE_DS1_UV_ERROR_INT) { | |
1206 | irqst1 &= ~VPE_DS1_UV_ERROR_INT; | |
1207 | ds1_uv_error(ctx); | |
1208 | } | |
45719127 AT |
1209 | } |
1210 | ||
1211 | if (irqst0) { | |
1212 | if (irqst0 & VPE_INT0_LIST0_COMPLETE) | |
1213 | vpdma_clear_list_stat(ctx->dev->vpdma); | |
1214 | ||
1215 | irqst0 &= ~(VPE_INT0_LIST0_COMPLETE); | |
1216 | } | |
1217 | ||
1218 | if (irqst0 | irqst1) { | |
1219 | dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: " | |
1220 | "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n", | |
1221 | irqst0, irqst1); | |
1222 | } | |
1223 | ||
1224 | disable_irqs(ctx); | |
1225 | ||
1226 | vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf); | |
1227 | vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb); | |
773f0657 AT |
1228 | vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h); |
1229 | vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v); | |
45719127 AT |
1230 | |
1231 | vpdma_reset_desc_list(&ctx->desc_list); | |
1232 | ||
585e6f01 AT |
1233 | /* the previous dst mv buffer becomes the next src mv buffer */ |
1234 | ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector; | |
1235 | ||
45719127 AT |
1236 | if (ctx->aborting) |
1237 | goto finished; | |
1238 | ||
585e6f01 | 1239 | s_vb = ctx->src_vbs[0]; |
45719127 AT |
1240 | d_vb = ctx->dst_vb; |
1241 | s_buf = &s_vb->v4l2_buf; | |
1242 | d_buf = &d_vb->v4l2_buf; | |
1243 | ||
1244 | d_buf->timestamp = s_buf->timestamp; | |
1245 | if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) { | |
1246 | d_buf->flags |= V4L2_BUF_FLAG_TIMECODE; | |
1247 | d_buf->timecode = s_buf->timecode; | |
1248 | } | |
45719127 | 1249 | d_buf->sequence = ctx->sequence; |
585e6f01 AT |
1250 | d_buf->field = ctx->field; |
1251 | ||
1252 | d_q_data = &ctx->q_data[Q_DATA_DST]; | |
1253 | if (d_q_data->flags & Q_DATA_INTERLACED) { | |
1254 | if (ctx->field == V4L2_FIELD_BOTTOM) { | |
1255 | ctx->sequence++; | |
1256 | ctx->field = V4L2_FIELD_TOP; | |
1257 | } else { | |
1258 | WARN_ON(ctx->field != V4L2_FIELD_TOP); | |
1259 | ctx->field = V4L2_FIELD_BOTTOM; | |
1260 | } | |
1261 | } else { | |
1262 | ctx->sequence++; | |
1263 | } | |
45719127 | 1264 | |
585e6f01 AT |
1265 | if (ctx->deinterlacing) |
1266 | s_vb = ctx->src_vbs[2]; | |
45719127 AT |
1267 | |
1268 | spin_lock_irqsave(&dev->lock, flags); | |
1269 | v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE); | |
1270 | v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE); | |
1271 | spin_unlock_irqrestore(&dev->lock, flags); | |
1272 | ||
585e6f01 AT |
1273 | if (ctx->deinterlacing) { |
1274 | ctx->src_vbs[2] = ctx->src_vbs[1]; | |
1275 | ctx->src_vbs[1] = ctx->src_vbs[0]; | |
1276 | } | |
1277 | ||
45719127 AT |
1278 | ctx->bufs_completed++; |
1279 | if (ctx->bufs_completed < ctx->bufs_per_job) { | |
1280 | device_run(ctx); | |
1281 | goto handled; | |
1282 | } | |
1283 | ||
1284 | finished: | |
1285 | vpe_dbg(ctx->dev, "finishing transaction\n"); | |
1286 | ctx->bufs_completed = 0; | |
1287 | v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx); | |
1288 | handled: | |
1289 | return IRQ_HANDLED; | |
1290 | } | |
1291 | ||
1292 | /* | |
1293 | * video ioctls | |
1294 | */ | |
1295 | static int vpe_querycap(struct file *file, void *priv, | |
1296 | struct v4l2_capability *cap) | |
1297 | { | |
1298 | strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1); | |
1299 | strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1); | |
1300 | strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info)); | |
1301 | cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; | |
1302 | cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; | |
1303 | return 0; | |
1304 | } | |
1305 | ||
1306 | static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type) | |
1307 | { | |
1308 | int i, index; | |
1309 | struct vpe_fmt *fmt = NULL; | |
1310 | ||
1311 | index = 0; | |
1312 | for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) { | |
1313 | if (vpe_formats[i].types & type) { | |
1314 | if (index == f->index) { | |
1315 | fmt = &vpe_formats[i]; | |
1316 | break; | |
1317 | } | |
1318 | index++; | |
1319 | } | |
1320 | } | |
1321 | ||
1322 | if (!fmt) | |
1323 | return -EINVAL; | |
1324 | ||
1325 | strncpy(f->description, fmt->name, sizeof(f->description) - 1); | |
1326 | f->pixelformat = fmt->fourcc; | |
1327 | return 0; | |
1328 | } | |
1329 | ||
1330 | static int vpe_enum_fmt(struct file *file, void *priv, | |
1331 | struct v4l2_fmtdesc *f) | |
1332 | { | |
1333 | if (V4L2_TYPE_IS_OUTPUT(f->type)) | |
1334 | return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT); | |
1335 | ||
1336 | return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE); | |
1337 | } | |
1338 | ||
1339 | static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f) | |
1340 | { | |
1341 | struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; | |
1342 | struct vpe_ctx *ctx = file2ctx(file); | |
1343 | struct vb2_queue *vq; | |
1344 | struct vpe_q_data *q_data; | |
1345 | int i; | |
1346 | ||
1347 | vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); | |
1348 | if (!vq) | |
1349 | return -EINVAL; | |
1350 | ||
1351 | q_data = get_q_data(ctx, f->type); | |
1352 | ||
1353 | pix->width = q_data->width; | |
1354 | pix->height = q_data->height; | |
1355 | pix->pixelformat = q_data->fmt->fourcc; | |
585e6f01 | 1356 | pix->field = q_data->field; |
45719127 AT |
1357 | |
1358 | if (V4L2_TYPE_IS_OUTPUT(f->type)) { | |
1359 | pix->colorspace = q_data->colorspace; | |
1360 | } else { | |
1361 | struct vpe_q_data *s_q_data; | |
1362 | ||
1363 | /* get colorspace from the source queue */ | |
1364 | s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); | |
1365 | ||
1366 | pix->colorspace = s_q_data->colorspace; | |
1367 | } | |
1368 | ||
1369 | pix->num_planes = q_data->fmt->coplanar ? 2 : 1; | |
1370 | ||
1371 | for (i = 0; i < pix->num_planes; i++) { | |
1372 | pix->plane_fmt[i].bytesperline = q_data->bytesperline[i]; | |
1373 | pix->plane_fmt[i].sizeimage = q_data->sizeimage[i]; | |
1374 | } | |
1375 | ||
1376 | return 0; | |
1377 | } | |
1378 | ||
1379 | static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, | |
1380 | struct vpe_fmt *fmt, int type) | |
1381 | { | |
1382 | struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; | |
1383 | struct v4l2_plane_pix_format *plane_fmt; | |
a51cd8f5 AT |
1384 | unsigned int w_align; |
1385 | int i, depth, depth_bytes; | |
45719127 AT |
1386 | |
1387 | if (!fmt || !(fmt->types & type)) { | |
1388 | vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n", | |
1389 | pix->pixelformat); | |
1390 | return -EINVAL; | |
1391 | } | |
1392 | ||
585e6f01 AT |
1393 | if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE) |
1394 | pix->field = V4L2_FIELD_NONE; | |
45719127 | 1395 | |
a51cd8f5 AT |
1396 | depth = fmt->vpdma_fmt[VPE_LUMA]->depth; |
1397 | ||
1398 | /* | |
1399 | * the line stride should 16 byte aligned for VPDMA to work, based on | |
1400 | * the bytes per pixel, figure out how much the width should be aligned | |
1401 | * to make sure line stride is 16 byte aligned | |
1402 | */ | |
1403 | depth_bytes = depth >> 3; | |
1404 | ||
1405 | if (depth_bytes == 3) | |
1406 | /* | |
1407 | * if bpp is 3(as in some RGB formats), the pixel width doesn't | |
1408 | * really help in ensuring line stride is 16 byte aligned | |
1409 | */ | |
1410 | w_align = 4; | |
1411 | else | |
1412 | /* | |
1413 | * for the remainder bpp(4, 2 and 1), the pixel width alignment | |
1414 | * can ensure a line stride alignment of 16 bytes. For example, | |
1415 | * if bpp is 2, then the line stride can be 16 byte aligned if | |
1416 | * the width is 8 byte aligned | |
1417 | */ | |
1418 | w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes); | |
1419 | ||
1420 | v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align, | |
45719127 AT |
1421 | &pix->height, MIN_H, MAX_H, H_ALIGN, |
1422 | S_ALIGN); | |
1423 | ||
1424 | pix->num_planes = fmt->coplanar ? 2 : 1; | |
1425 | pix->pixelformat = fmt->fourcc; | |
1426 | ||
1427 | if (type == VPE_FMT_TYPE_CAPTURE) { | |
1428 | struct vpe_q_data *s_q_data; | |
1429 | ||
1430 | /* get colorspace from the source queue */ | |
1431 | s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); | |
1432 | ||
1433 | pix->colorspace = s_q_data->colorspace; | |
1434 | } else { | |
1435 | if (!pix->colorspace) | |
1436 | pix->colorspace = V4L2_COLORSPACE_SMPTE240M; | |
1437 | } | |
1438 | ||
1439 | for (i = 0; i < pix->num_planes; i++) { | |
45719127 AT |
1440 | plane_fmt = &pix->plane_fmt[i]; |
1441 | depth = fmt->vpdma_fmt[i]->depth; | |
1442 | ||
1443 | if (i == VPE_LUMA) | |
a51cd8f5 | 1444 | plane_fmt->bytesperline = (pix->width * depth) >> 3; |
45719127 AT |
1445 | else |
1446 | plane_fmt->bytesperline = pix->width; | |
1447 | ||
1448 | plane_fmt->sizeimage = | |
1449 | (pix->height * pix->width * depth) >> 3; | |
1450 | } | |
1451 | ||
1452 | return 0; | |
1453 | } | |
1454 | ||
1455 | static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f) | |
1456 | { | |
1457 | struct vpe_ctx *ctx = file2ctx(file); | |
1458 | struct vpe_fmt *fmt = find_format(f); | |
1459 | ||
1460 | if (V4L2_TYPE_IS_OUTPUT(f->type)) | |
1461 | return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT); | |
1462 | else | |
1463 | return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE); | |
1464 | } | |
1465 | ||
1466 | static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f) | |
1467 | { | |
1468 | struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; | |
1469 | struct v4l2_plane_pix_format *plane_fmt; | |
1470 | struct vpe_q_data *q_data; | |
1471 | struct vb2_queue *vq; | |
1472 | int i; | |
1473 | ||
1474 | vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); | |
1475 | if (!vq) | |
1476 | return -EINVAL; | |
1477 | ||
1478 | if (vb2_is_busy(vq)) { | |
1479 | vpe_err(ctx->dev, "queue busy\n"); | |
1480 | return -EBUSY; | |
1481 | } | |
1482 | ||
1483 | q_data = get_q_data(ctx, f->type); | |
1484 | if (!q_data) | |
1485 | return -EINVAL; | |
1486 | ||
1487 | q_data->fmt = find_format(f); | |
1488 | q_data->width = pix->width; | |
1489 | q_data->height = pix->height; | |
1490 | q_data->colorspace = pix->colorspace; | |
585e6f01 | 1491 | q_data->field = pix->field; |
45719127 AT |
1492 | |
1493 | for (i = 0; i < pix->num_planes; i++) { | |
1494 | plane_fmt = &pix->plane_fmt[i]; | |
1495 | ||
1496 | q_data->bytesperline[i] = plane_fmt->bytesperline; | |
1497 | q_data->sizeimage[i] = plane_fmt->sizeimage; | |
1498 | } | |
1499 | ||
1500 | q_data->c_rect.left = 0; | |
1501 | q_data->c_rect.top = 0; | |
1502 | q_data->c_rect.width = q_data->width; | |
1503 | q_data->c_rect.height = q_data->height; | |
1504 | ||
585e6f01 AT |
1505 | if (q_data->field == V4L2_FIELD_ALTERNATE) |
1506 | q_data->flags |= Q_DATA_INTERLACED; | |
1507 | else | |
1508 | q_data->flags &= ~Q_DATA_INTERLACED; | |
1509 | ||
45719127 AT |
1510 | vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d", |
1511 | f->type, q_data->width, q_data->height, q_data->fmt->fourcc, | |
1512 | q_data->bytesperline[VPE_LUMA]); | |
1513 | if (q_data->fmt->coplanar) | |
1514 | vpe_dbg(ctx->dev, " bpl_uv %d\n", | |
1515 | q_data->bytesperline[VPE_CHROMA]); | |
1516 | ||
1517 | return 0; | |
1518 | } | |
1519 | ||
1520 | static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |
1521 | { | |
1522 | int ret; | |
1523 | struct vpe_ctx *ctx = file2ctx(file); | |
1524 | ||
1525 | ret = vpe_try_fmt(file, priv, f); | |
1526 | if (ret) | |
1527 | return ret; | |
1528 | ||
1529 | ret = __vpe_s_fmt(ctx, f); | |
1530 | if (ret) | |
1531 | return ret; | |
1532 | ||
1533 | if (V4L2_TYPE_IS_OUTPUT(f->type)) | |
1534 | set_src_registers(ctx); | |
1535 | else | |
1536 | set_dst_registers(ctx); | |
1537 | ||
1538 | return set_srcdst_params(ctx); | |
1539 | } | |
1540 | ||
1541 | static int vpe_reqbufs(struct file *file, void *priv, | |
1542 | struct v4l2_requestbuffers *reqbufs) | |
1543 | { | |
1544 | struct vpe_ctx *ctx = file2ctx(file); | |
1545 | ||
1546 | return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); | |
1547 | } | |
1548 | ||
1549 | static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) | |
1550 | { | |
1551 | struct vpe_ctx *ctx = file2ctx(file); | |
1552 | ||
1553 | return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); | |
1554 | } | |
1555 | ||
1556 | static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) | |
1557 | { | |
1558 | struct vpe_ctx *ctx = file2ctx(file); | |
1559 | ||
1560 | return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); | |
1561 | } | |
1562 | ||
1563 | static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) | |
1564 | { | |
1565 | struct vpe_ctx *ctx = file2ctx(file); | |
1566 | ||
1567 | return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); | |
1568 | } | |
1569 | ||
1570 | static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type) | |
1571 | { | |
1572 | struct vpe_ctx *ctx = file2ctx(file); | |
1573 | ||
1574 | return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); | |
1575 | } | |
1576 | ||
1577 | static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) | |
1578 | { | |
1579 | struct vpe_ctx *ctx = file2ctx(file); | |
1580 | ||
1581 | vpe_dump_regs(ctx->dev); | |
1582 | vpdma_dump_regs(ctx->dev->vpdma); | |
1583 | ||
1584 | return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); | |
1585 | } | |
1586 | ||
1587 | /* | |
1588 | * defines number of buffers/frames a context can process with VPE before | |
1589 | * switching to a different context. default value is 1 buffer per context | |
1590 | */ | |
1591 | #define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0) | |
1592 | ||
1593 | static int vpe_s_ctrl(struct v4l2_ctrl *ctrl) | |
1594 | { | |
1595 | struct vpe_ctx *ctx = | |
1596 | container_of(ctrl->handler, struct vpe_ctx, hdl); | |
1597 | ||
1598 | switch (ctrl->id) { | |
1599 | case V4L2_CID_VPE_BUFS_PER_JOB: | |
1600 | ctx->bufs_per_job = ctrl->val; | |
1601 | break; | |
1602 | ||
1603 | default: | |
1604 | vpe_err(ctx->dev, "Invalid control\n"); | |
1605 | return -EINVAL; | |
1606 | } | |
1607 | ||
1608 | return 0; | |
1609 | } | |
1610 | ||
1611 | static const struct v4l2_ctrl_ops vpe_ctrl_ops = { | |
1612 | .s_ctrl = vpe_s_ctrl, | |
1613 | }; | |
1614 | ||
1615 | static const struct v4l2_ioctl_ops vpe_ioctl_ops = { | |
1616 | .vidioc_querycap = vpe_querycap, | |
1617 | ||
1618 | .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt, | |
1619 | .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt, | |
1620 | .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt, | |
1621 | .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt, | |
1622 | ||
1623 | .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt, | |
1624 | .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt, | |
1625 | .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt, | |
1626 | .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt, | |
1627 | ||
1628 | .vidioc_reqbufs = vpe_reqbufs, | |
1629 | .vidioc_querybuf = vpe_querybuf, | |
1630 | ||
1631 | .vidioc_qbuf = vpe_qbuf, | |
1632 | .vidioc_dqbuf = vpe_dqbuf, | |
1633 | ||
1634 | .vidioc_streamon = vpe_streamon, | |
1635 | .vidioc_streamoff = vpe_streamoff, | |
1636 | .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, | |
1637 | .vidioc_unsubscribe_event = v4l2_event_unsubscribe, | |
1638 | }; | |
1639 | ||
1640 | /* | |
1641 | * Queue operations | |
1642 | */ | |
1643 | static int vpe_queue_setup(struct vb2_queue *vq, | |
1644 | const struct v4l2_format *fmt, | |
1645 | unsigned int *nbuffers, unsigned int *nplanes, | |
1646 | unsigned int sizes[], void *alloc_ctxs[]) | |
1647 | { | |
1648 | int i; | |
1649 | struct vpe_ctx *ctx = vb2_get_drv_priv(vq); | |
1650 | struct vpe_q_data *q_data; | |
1651 | ||
1652 | q_data = get_q_data(ctx, vq->type); | |
1653 | ||
1654 | *nplanes = q_data->fmt->coplanar ? 2 : 1; | |
1655 | ||
1656 | for (i = 0; i < *nplanes; i++) { | |
1657 | sizes[i] = q_data->sizeimage[i]; | |
1658 | alloc_ctxs[i] = ctx->dev->alloc_ctx; | |
1659 | } | |
1660 | ||
1661 | vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers, | |
1662 | sizes[VPE_LUMA]); | |
1663 | if (q_data->fmt->coplanar) | |
1664 | vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]); | |
1665 | ||
1666 | return 0; | |
1667 | } | |
1668 | ||
1669 | static int vpe_buf_prepare(struct vb2_buffer *vb) | |
1670 | { | |
1671 | struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); | |
1672 | struct vpe_q_data *q_data; | |
1673 | int i, num_planes; | |
1674 | ||
1675 | vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type); | |
1676 | ||
1677 | q_data = get_q_data(ctx, vb->vb2_queue->type); | |
1678 | num_planes = q_data->fmt->coplanar ? 2 : 1; | |
1679 | ||
1680 | for (i = 0; i < num_planes; i++) { | |
1681 | if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) { | |
1682 | vpe_err(ctx->dev, | |
1683 | "data will not fit into plane (%lu < %lu)\n", | |
1684 | vb2_plane_size(vb, i), | |
1685 | (long) q_data->sizeimage[i]); | |
1686 | return -EINVAL; | |
1687 | } | |
1688 | } | |
1689 | ||
1690 | for (i = 0; i < num_planes; i++) | |
1691 | vb2_set_plane_payload(vb, i, q_data->sizeimage[i]); | |
1692 | ||
1693 | return 0; | |
1694 | } | |
1695 | ||
1696 | static void vpe_buf_queue(struct vb2_buffer *vb) | |
1697 | { | |
1698 | struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); | |
1699 | v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); | |
1700 | } | |
1701 | ||
1702 | static void vpe_wait_prepare(struct vb2_queue *q) | |
1703 | { | |
1704 | struct vpe_ctx *ctx = vb2_get_drv_priv(q); | |
1705 | vpe_unlock(ctx); | |
1706 | } | |
1707 | ||
1708 | static void vpe_wait_finish(struct vb2_queue *q) | |
1709 | { | |
1710 | struct vpe_ctx *ctx = vb2_get_drv_priv(q); | |
1711 | vpe_lock(ctx); | |
1712 | } | |
1713 | ||
1714 | static struct vb2_ops vpe_qops = { | |
1715 | .queue_setup = vpe_queue_setup, | |
1716 | .buf_prepare = vpe_buf_prepare, | |
1717 | .buf_queue = vpe_buf_queue, | |
1718 | .wait_prepare = vpe_wait_prepare, | |
1719 | .wait_finish = vpe_wait_finish, | |
1720 | }; | |
1721 | ||
1722 | static int queue_init(void *priv, struct vb2_queue *src_vq, | |
1723 | struct vb2_queue *dst_vq) | |
1724 | { | |
1725 | struct vpe_ctx *ctx = priv; | |
1726 | int ret; | |
1727 | ||
1728 | memset(src_vq, 0, sizeof(*src_vq)); | |
1729 | src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1730 | src_vq->io_modes = VB2_MMAP; | |
1731 | src_vq->drv_priv = ctx; | |
1732 | src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); | |
1733 | src_vq->ops = &vpe_qops; | |
1734 | src_vq->mem_ops = &vb2_dma_contig_memops; | |
1735 | src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; | |
1736 | ||
1737 | ret = vb2_queue_init(src_vq); | |
1738 | if (ret) | |
1739 | return ret; | |
1740 | ||
1741 | memset(dst_vq, 0, sizeof(*dst_vq)); | |
1742 | dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1743 | dst_vq->io_modes = VB2_MMAP; | |
1744 | dst_vq->drv_priv = ctx; | |
1745 | dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); | |
1746 | dst_vq->ops = &vpe_qops; | |
1747 | dst_vq->mem_ops = &vb2_dma_contig_memops; | |
1748 | dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; | |
1749 | ||
1750 | return vb2_queue_init(dst_vq); | |
1751 | } | |
1752 | ||
1753 | static const struct v4l2_ctrl_config vpe_bufs_per_job = { | |
1754 | .ops = &vpe_ctrl_ops, | |
1755 | .id = V4L2_CID_VPE_BUFS_PER_JOB, | |
1756 | .name = "Buffers Per Transaction", | |
1757 | .type = V4L2_CTRL_TYPE_INTEGER, | |
1758 | .def = VPE_DEF_BUFS_PER_JOB, | |
1759 | .min = 1, | |
1760 | .max = VIDEO_MAX_FRAME, | |
1761 | .step = 1, | |
1762 | }; | |
1763 | ||
1764 | /* | |
1765 | * File operations | |
1766 | */ | |
1767 | static int vpe_open(struct file *file) | |
1768 | { | |
1769 | struct vpe_dev *dev = video_drvdata(file); | |
1770 | struct vpe_ctx *ctx = NULL; | |
1771 | struct vpe_q_data *s_q_data; | |
1772 | struct v4l2_ctrl_handler *hdl; | |
1773 | int ret; | |
1774 | ||
1775 | vpe_dbg(dev, "vpe_open\n"); | |
1776 | ||
1777 | if (!dev->vpdma->ready) { | |
1778 | vpe_err(dev, "vpdma firmware not loaded\n"); | |
1779 | return -ENODEV; | |
1780 | } | |
1781 | ||
1782 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
1783 | if (!ctx) | |
1784 | return -ENOMEM; | |
1785 | ||
1786 | ctx->dev = dev; | |
1787 | ||
1788 | if (mutex_lock_interruptible(&dev->dev_mutex)) { | |
1789 | ret = -ERESTARTSYS; | |
1790 | goto free_ctx; | |
1791 | } | |
1792 | ||
1793 | ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE, | |
1794 | VPDMA_LIST_TYPE_NORMAL); | |
1795 | if (ret != 0) | |
1796 | goto unlock; | |
1797 | ||
1798 | ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb)); | |
1799 | if (ret != 0) | |
1800 | goto free_desc_list; | |
1801 | ||
773f0657 AT |
1802 | ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE); |
1803 | if (ret != 0) | |
1804 | goto free_mmr_adb; | |
1805 | ||
1806 | ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE); | |
1807 | if (ret != 0) | |
1808 | goto free_sc_h; | |
1809 | ||
45719127 AT |
1810 | init_adb_hdrs(ctx); |
1811 | ||
1812 | v4l2_fh_init(&ctx->fh, video_devdata(file)); | |
1813 | file->private_data = &ctx->fh; | |
1814 | ||
1815 | hdl = &ctx->hdl; | |
1816 | v4l2_ctrl_handler_init(hdl, 1); | |
1817 | v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL); | |
1818 | if (hdl->error) { | |
1819 | ret = hdl->error; | |
1820 | goto exit_fh; | |
1821 | } | |
1822 | ctx->fh.ctrl_handler = hdl; | |
1823 | v4l2_ctrl_handler_setup(hdl); | |
1824 | ||
1825 | s_q_data = &ctx->q_data[Q_DATA_SRC]; | |
1826 | s_q_data->fmt = &vpe_formats[2]; | |
1827 | s_q_data->width = 1920; | |
1828 | s_q_data->height = 1080; | |
1829 | s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height * | |
1830 | s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3; | |
1831 | s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M; | |
585e6f01 | 1832 | s_q_data->field = V4L2_FIELD_NONE; |
45719127 AT |
1833 | s_q_data->c_rect.left = 0; |
1834 | s_q_data->c_rect.top = 0; | |
1835 | s_q_data->c_rect.width = s_q_data->width; | |
1836 | s_q_data->c_rect.height = s_q_data->height; | |
1837 | s_q_data->flags = 0; | |
1838 | ||
1839 | ctx->q_data[Q_DATA_DST] = *s_q_data; | |
1840 | ||
585e6f01 | 1841 | set_dei_shadow_registers(ctx); |
45719127 AT |
1842 | set_src_registers(ctx); |
1843 | set_dst_registers(ctx); | |
1844 | ret = set_srcdst_params(ctx); | |
1845 | if (ret) | |
1846 | goto exit_fh; | |
1847 | ||
1848 | ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); | |
1849 | ||
1850 | if (IS_ERR(ctx->m2m_ctx)) { | |
1851 | ret = PTR_ERR(ctx->m2m_ctx); | |
1852 | goto exit_fh; | |
1853 | } | |
1854 | ||
1855 | v4l2_fh_add(&ctx->fh); | |
1856 | ||
1857 | /* | |
1858 | * for now, just report the creation of the first instance, we can later | |
1859 | * optimize the driver to enable or disable clocks when the first | |
1860 | * instance is created or the last instance released | |
1861 | */ | |
1862 | if (atomic_inc_return(&dev->num_instances) == 1) | |
1863 | vpe_dbg(dev, "first instance created\n"); | |
1864 | ||
1865 | ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB; | |
1866 | ||
1867 | ctx->load_mmrs = true; | |
1868 | ||
1869 | vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n", | |
1870 | ctx, ctx->m2m_ctx); | |
1871 | ||
1872 | mutex_unlock(&dev->dev_mutex); | |
1873 | ||
1874 | return 0; | |
1875 | exit_fh: | |
1876 | v4l2_ctrl_handler_free(hdl); | |
1877 | v4l2_fh_exit(&ctx->fh); | |
773f0657 AT |
1878 | vpdma_free_desc_buf(&ctx->sc_coeff_v); |
1879 | free_sc_h: | |
1880 | vpdma_free_desc_buf(&ctx->sc_coeff_h); | |
1881 | free_mmr_adb: | |
45719127 AT |
1882 | vpdma_free_desc_buf(&ctx->mmr_adb); |
1883 | free_desc_list: | |
1884 | vpdma_free_desc_list(&ctx->desc_list); | |
1885 | unlock: | |
1886 | mutex_unlock(&dev->dev_mutex); | |
1887 | free_ctx: | |
1888 | kfree(ctx); | |
1889 | return ret; | |
1890 | } | |
1891 | ||
1892 | static int vpe_release(struct file *file) | |
1893 | { | |
1894 | struct vpe_dev *dev = video_drvdata(file); | |
1895 | struct vpe_ctx *ctx = file2ctx(file); | |
1896 | ||
1897 | vpe_dbg(dev, "releasing instance %p\n", ctx); | |
1898 | ||
1899 | mutex_lock(&dev->dev_mutex); | |
585e6f01 AT |
1900 | free_vbs(ctx); |
1901 | free_mv_buffers(ctx); | |
45719127 AT |
1902 | vpdma_free_desc_list(&ctx->desc_list); |
1903 | vpdma_free_desc_buf(&ctx->mmr_adb); | |
1904 | ||
1905 | v4l2_fh_del(&ctx->fh); | |
1906 | v4l2_fh_exit(&ctx->fh); | |
1907 | v4l2_ctrl_handler_free(&ctx->hdl); | |
1908 | v4l2_m2m_ctx_release(ctx->m2m_ctx); | |
1909 | ||
1910 | kfree(ctx); | |
1911 | ||
1912 | /* | |
1913 | * for now, just report the release of the last instance, we can later | |
1914 | * optimize the driver to enable or disable clocks when the first | |
1915 | * instance is created or the last instance released | |
1916 | */ | |
1917 | if (atomic_dec_return(&dev->num_instances) == 0) | |
1918 | vpe_dbg(dev, "last instance released\n"); | |
1919 | ||
1920 | mutex_unlock(&dev->dev_mutex); | |
1921 | ||
1922 | return 0; | |
1923 | } | |
1924 | ||
1925 | static unsigned int vpe_poll(struct file *file, | |
1926 | struct poll_table_struct *wait) | |
1927 | { | |
1928 | struct vpe_ctx *ctx = file2ctx(file); | |
1929 | struct vpe_dev *dev = ctx->dev; | |
1930 | int ret; | |
1931 | ||
1932 | mutex_lock(&dev->dev_mutex); | |
1933 | ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait); | |
1934 | mutex_unlock(&dev->dev_mutex); | |
1935 | return ret; | |
1936 | } | |
1937 | ||
1938 | static int vpe_mmap(struct file *file, struct vm_area_struct *vma) | |
1939 | { | |
1940 | struct vpe_ctx *ctx = file2ctx(file); | |
1941 | struct vpe_dev *dev = ctx->dev; | |
1942 | int ret; | |
1943 | ||
1944 | if (mutex_lock_interruptible(&dev->dev_mutex)) | |
1945 | return -ERESTARTSYS; | |
1946 | ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); | |
1947 | mutex_unlock(&dev->dev_mutex); | |
1948 | return ret; | |
1949 | } | |
1950 | ||
1951 | static const struct v4l2_file_operations vpe_fops = { | |
1952 | .owner = THIS_MODULE, | |
1953 | .open = vpe_open, | |
1954 | .release = vpe_release, | |
1955 | .poll = vpe_poll, | |
1956 | .unlocked_ioctl = video_ioctl2, | |
1957 | .mmap = vpe_mmap, | |
1958 | }; | |
1959 | ||
1960 | static struct video_device vpe_videodev = { | |
1961 | .name = VPE_MODULE_NAME, | |
1962 | .fops = &vpe_fops, | |
1963 | .ioctl_ops = &vpe_ioctl_ops, | |
1964 | .minor = -1, | |
1965 | .release = video_device_release, | |
1966 | .vfl_dir = VFL_DIR_M2M, | |
1967 | }; | |
1968 | ||
1969 | static struct v4l2_m2m_ops m2m_ops = { | |
1970 | .device_run = device_run, | |
1971 | .job_ready = job_ready, | |
1972 | .job_abort = job_abort, | |
1973 | .lock = vpe_lock, | |
1974 | .unlock = vpe_unlock, | |
1975 | }; | |
1976 | ||
1977 | static int vpe_runtime_get(struct platform_device *pdev) | |
1978 | { | |
1979 | int r; | |
1980 | ||
1981 | dev_dbg(&pdev->dev, "vpe_runtime_get\n"); | |
1982 | ||
1983 | r = pm_runtime_get_sync(&pdev->dev); | |
1984 | WARN_ON(r < 0); | |
1985 | return r < 0 ? r : 0; | |
1986 | } | |
1987 | ||
1988 | static void vpe_runtime_put(struct platform_device *pdev) | |
1989 | { | |
1990 | ||
1991 | int r; | |
1992 | ||
1993 | dev_dbg(&pdev->dev, "vpe_runtime_put\n"); | |
1994 | ||
1995 | r = pm_runtime_put_sync(&pdev->dev); | |
1996 | WARN_ON(r < 0 && r != -ENOSYS); | |
1997 | } | |
1998 | ||
1999 | static int vpe_probe(struct platform_device *pdev) | |
2000 | { | |
2001 | struct vpe_dev *dev; | |
2002 | struct video_device *vfd; | |
45719127 AT |
2003 | int ret, irq, func; |
2004 | ||
2005 | dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); | |
b68231a1 WY |
2006 | if (!dev) |
2007 | return -ENOMEM; | |
45719127 AT |
2008 | |
2009 | spin_lock_init(&dev->lock); | |
2010 | ||
2011 | ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); | |
2012 | if (ret) | |
2013 | return ret; | |
2014 | ||
2015 | atomic_set(&dev->num_instances, 0); | |
2016 | mutex_init(&dev->dev_mutex); | |
2017 | ||
44687b2e AT |
2018 | dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
2019 | "vpe_top"); | |
45719127 AT |
2020 | /* |
2021 | * HACK: we get resource info from device tree in the form of a list of | |
2022 | * VPE sub blocks, the driver currently uses only the base of vpe_top | |
2023 | * for register access, the driver should be changed later to access | |
2024 | * registers based on the sub block base addresses | |
2025 | */ | |
44687b2e | 2026 | dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K); |
b68231a1 WY |
2027 | if (!dev->base) { |
2028 | ret = -ENOMEM; | |
45719127 AT |
2029 | goto v4l2_dev_unreg; |
2030 | } | |
2031 | ||
2032 | irq = platform_get_irq(pdev, 0); | |
2033 | ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME, | |
2034 | dev); | |
2035 | if (ret) | |
2036 | goto v4l2_dev_unreg; | |
2037 | ||
2038 | platform_set_drvdata(pdev, dev); | |
2039 | ||
2040 | dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); | |
2041 | if (IS_ERR(dev->alloc_ctx)) { | |
2042 | vpe_err(dev, "Failed to alloc vb2 context\n"); | |
2043 | ret = PTR_ERR(dev->alloc_ctx); | |
2044 | goto v4l2_dev_unreg; | |
2045 | } | |
2046 | ||
2047 | dev->m2m_dev = v4l2_m2m_init(&m2m_ops); | |
2048 | if (IS_ERR(dev->m2m_dev)) { | |
2049 | vpe_err(dev, "Failed to init mem2mem device\n"); | |
2050 | ret = PTR_ERR(dev->m2m_dev); | |
2051 | goto rel_ctx; | |
2052 | } | |
2053 | ||
2054 | pm_runtime_enable(&pdev->dev); | |
2055 | ||
2056 | ret = vpe_runtime_get(pdev); | |
2057 | if (ret) | |
2058 | goto rel_m2m; | |
2059 | ||
2060 | /* Perform clk enable followed by reset */ | |
2061 | vpe_set_clock_enable(dev, 1); | |
2062 | ||
2063 | vpe_top_reset(dev); | |
2064 | ||
2065 | func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK, | |
2066 | VPE_PID_FUNC_SHIFT); | |
2067 | vpe_dbg(dev, "VPE PID function %x\n", func); | |
2068 | ||
2069 | vpe_top_vpdma_reset(dev); | |
2070 | ||
44687b2e AT |
2071 | dev->sc = sc_create(pdev); |
2072 | if (IS_ERR(dev->sc)) { | |
2073 | ret = PTR_ERR(dev->sc); | |
2074 | goto runtime_put; | |
2075 | } | |
2076 | ||
45719127 | 2077 | dev->vpdma = vpdma_create(pdev); |
6676cafe WY |
2078 | if (IS_ERR(dev->vpdma)) { |
2079 | ret = PTR_ERR(dev->vpdma); | |
45719127 | 2080 | goto runtime_put; |
6676cafe | 2081 | } |
45719127 AT |
2082 | |
2083 | vfd = &dev->vfd; | |
2084 | *vfd = vpe_videodev; | |
2085 | vfd->lock = &dev->dev_mutex; | |
2086 | vfd->v4l2_dev = &dev->v4l2_dev; | |
2087 | ||
2088 | ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); | |
2089 | if (ret) { | |
2090 | vpe_err(dev, "Failed to register video device\n"); | |
2091 | goto runtime_put; | |
2092 | } | |
2093 | ||
2094 | video_set_drvdata(vfd, dev); | |
2095 | snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name); | |
2096 | dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n", | |
2097 | vfd->num); | |
2098 | ||
2099 | return 0; | |
2100 | ||
2101 | runtime_put: | |
2102 | vpe_runtime_put(pdev); | |
2103 | rel_m2m: | |
2104 | pm_runtime_disable(&pdev->dev); | |
2105 | v4l2_m2m_release(dev->m2m_dev); | |
2106 | rel_ctx: | |
2107 | vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); | |
2108 | v4l2_dev_unreg: | |
2109 | v4l2_device_unregister(&dev->v4l2_dev); | |
2110 | ||
2111 | return ret; | |
2112 | } | |
2113 | ||
2114 | static int vpe_remove(struct platform_device *pdev) | |
2115 | { | |
2116 | struct vpe_dev *dev = | |
2117 | (struct vpe_dev *) platform_get_drvdata(pdev); | |
2118 | ||
2119 | v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME); | |
2120 | ||
2121 | v4l2_m2m_release(dev->m2m_dev); | |
2122 | video_unregister_device(&dev->vfd); | |
2123 | v4l2_device_unregister(&dev->v4l2_dev); | |
2124 | vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); | |
2125 | ||
2126 | vpe_set_clock_enable(dev, 0); | |
2127 | vpe_runtime_put(pdev); | |
2128 | pm_runtime_disable(&pdev->dev); | |
2129 | ||
2130 | return 0; | |
2131 | } | |
2132 | ||
2133 | #if defined(CONFIG_OF) | |
2134 | static const struct of_device_id vpe_of_match[] = { | |
2135 | { | |
2136 | .compatible = "ti,vpe", | |
2137 | }, | |
2138 | {}, | |
2139 | }; | |
2140 | #else | |
2141 | #define vpe_of_match NULL | |
2142 | #endif | |
2143 | ||
2144 | static struct platform_driver vpe_pdrv = { | |
2145 | .probe = vpe_probe, | |
2146 | .remove = vpe_remove, | |
2147 | .driver = { | |
2148 | .name = VPE_MODULE_NAME, | |
2149 | .owner = THIS_MODULE, | |
2150 | .of_match_table = vpe_of_match, | |
2151 | }, | |
2152 | }; | |
2153 | ||
903cbb83 | 2154 | module_platform_driver(vpe_pdrv); |
45719127 AT |
2155 | |
2156 | MODULE_DESCRIPTION("TI VPE driver"); | |
2157 | MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>"); | |
2158 | MODULE_LICENSE("GPL"); |