4 * Qualcomm MSM Camera Subsystem - VFE Module
6 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
7 * Copyright (C) 2015-2017 Linaro Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 and
11 * only version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 #include <linux/clk.h>
19 #include <linux/completion.h>
20 #include <linux/interrupt.h>
21 #include <linux/iommu.h>
22 #include <linux/iopoll.h>
23 #include <linux/mutex.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock_types.h>
27 #include <linux/spinlock.h>
28 #include <media/media-entity.h>
29 #include <media/v4l2-device.h>
30 #include <media/v4l2-subdev.h>
32 #include "camss-vfe.h"
35 #define MSM_VFE_NAME "msm_vfe"
37 #define vfe_line_array(ptr_line) \
38 ((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)]))
40 #define to_vfe(ptr_line) \
41 container_of(vfe_line_array(ptr_line), struct vfe_device, ptr_line)
43 #define VFE_0_HW_VERSION 0x000
45 #define VFE_0_GLOBAL_RESET_CMD 0x00c
46 #define VFE_0_GLOBAL_RESET_CMD_CORE (1 << 0)
47 #define VFE_0_GLOBAL_RESET_CMD_CAMIF (1 << 1)
48 #define VFE_0_GLOBAL_RESET_CMD_BUS (1 << 2)
49 #define VFE_0_GLOBAL_RESET_CMD_BUS_BDG (1 << 3)
50 #define VFE_0_GLOBAL_RESET_CMD_REGISTER (1 << 4)
51 #define VFE_0_GLOBAL_RESET_CMD_TIMER (1 << 5)
52 #define VFE_0_GLOBAL_RESET_CMD_PM (1 << 6)
53 #define VFE_0_GLOBAL_RESET_CMD_BUS_MISR (1 << 7)
54 #define VFE_0_GLOBAL_RESET_CMD_TESTGEN (1 << 8)
56 #define VFE_0_MODULE_CFG 0x018
57 #define VFE_0_MODULE_CFG_DEMUX (1 << 2)
58 #define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE (1 << 3)
59 #define VFE_0_MODULE_CFG_SCALE_ENC (1 << 23)
60 #define VFE_0_MODULE_CFG_CROP_ENC (1 << 27)
62 #define VFE_0_CORE_CFG 0x01c
63 #define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
64 #define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
65 #define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
66 #define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
68 #define VFE_0_IRQ_CMD 0x024
69 #define VFE_0_IRQ_CMD_GLOBAL_CLEAR (1 << 0)
71 #define VFE_0_IRQ_MASK_0 0x028
72 #define VFE_0_IRQ_MASK_0_CAMIF_SOF (1 << 0)
73 #define VFE_0_IRQ_MASK_0_CAMIF_EOF (1 << 1)
74 #define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
75 #define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
76 ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
77 #define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
78 #define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
79 #define VFE_0_IRQ_MASK_0_RESET_ACK (1 << 31)
80 #define VFE_0_IRQ_MASK_1 0x02c
81 #define VFE_0_IRQ_MASK_1_CAMIF_ERROR (1 << 0)
82 #define VFE_0_IRQ_MASK_1_VIOLATION (1 << 7)
83 #define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK (1 << 8)
84 #define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) (1 << ((n) + 9))
85 #define VFE_0_IRQ_MASK_1_RDIn_SOF(n) (1 << ((n) + 29))
87 #define VFE_0_IRQ_CLEAR_0 0x030
88 #define VFE_0_IRQ_CLEAR_1 0x034
90 #define VFE_0_IRQ_STATUS_0 0x038
91 #define VFE_0_IRQ_STATUS_0_CAMIF_SOF (1 << 0)
92 #define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
93 #define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
94 ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
95 #define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
96 #define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
97 #define VFE_0_IRQ_STATUS_0_RESET_ACK (1 << 31)
98 #define VFE_0_IRQ_STATUS_1 0x03c
99 #define VFE_0_IRQ_STATUS_1_VIOLATION (1 << 7)
100 #define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK (1 << 8)
101 #define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) (1 << ((n) + 29))
103 #define VFE_0_IRQ_COMPOSITE_MASK_0 0x40
104 #define VFE_0_VIOLATION_STATUS 0x48
106 #define VFE_0_BUS_CMD 0x4c
107 #define VFE_0_BUS_CMD_Mx_RLD_CMD(x) (1 << (x))
109 #define VFE_0_BUS_CFG 0x050
111 #define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2))
112 #define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN (1 << 1)
113 #define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
114 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
115 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0
116 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
117 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
118 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
120 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
121 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
122 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
123 #define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
124 #define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
125 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
126 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
127 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1F << 2)
129 #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
130 #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
131 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n))
132 #define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n))
133 #define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
135 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
137 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
139 #define VFE_0_BUS_PING_PONG_STATUS 0x268
141 #define VFE_0_BUS_BDG_CMD 0x2c0
142 #define VFE_0_BUS_BDG_CMD_HALT_REQ 1
144 #define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
145 #define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
146 #define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
147 #define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
148 #define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
149 #define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
150 #define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
151 #define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
152 #define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
153 #define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5
155 #define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
156 #define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
157 #define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
158 #define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
159 #define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
160 #define VFE_0_RDI_CFG_x_RDI_EN_BIT (1 << 2)
161 #define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
162 #define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) (1 << (16 + (r)))
164 #define VFE_0_CAMIF_CMD 0x2f4
165 #define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
166 #define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
167 #define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS (1 << 2)
168 #define VFE_0_CAMIF_CFG 0x2f8
169 #define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN (1 << 6)
170 #define VFE_0_CAMIF_FRAME_CFG 0x300
171 #define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304
172 #define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308
173 #define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c
174 #define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314
175 #define VFE_0_CAMIF_STATUS 0x31c
176 #define VFE_0_CAMIF_STATUS_HALT (1 << 31)
178 #define VFE_0_REG_UPDATE 0x378
179 #define VFE_0_REG_UPDATE_RDIn(n) (1 << (1 + (n)))
180 #define VFE_0_REG_UPDATE_line_n(n) \
181 ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
183 #define VFE_0_DEMUX_CFG 0x424
184 #define VFE_0_DEMUX_CFG_PERIOD 0x3
185 #define VFE_0_DEMUX_GAIN_0 0x428
186 #define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
187 #define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
188 #define VFE_0_DEMUX_GAIN_1 0x42c
189 #define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
190 #define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
191 #define VFE_0_DEMUX_EVEN_CFG 0x438
192 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
193 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
194 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
195 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
196 #define VFE_0_DEMUX_ODD_CFG 0x43c
197 #define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
198 #define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
199 #define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
200 #define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
202 #define VFE_0_SCALE_ENC_Y_CFG 0x75c
203 #define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760
204 #define VFE_0_SCALE_ENC_Y_H_PHASE 0x764
205 #define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c
206 #define VFE_0_SCALE_ENC_Y_V_PHASE 0x770
207 #define VFE_0_SCALE_ENC_CBCR_CFG 0x778
208 #define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c
209 #define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780
210 #define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790
211 #define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794
213 #define VFE_0_CROP_ENC_Y_WIDTH 0x854
214 #define VFE_0_CROP_ENC_Y_HEIGHT 0x858
215 #define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c
216 #define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860
218 #define VFE_0_CLAMP_ENC_MAX_CFG 0x874
219 #define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
220 #define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
221 #define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
222 #define VFE_0_CLAMP_ENC_MIN_CFG 0x878
223 #define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
224 #define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
225 #define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
227 #define VFE_0_CGC_OVERRIDE_1 0x974
228 #define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) (1 << (x))
230 /* VFE reset timeout */
231 #define VFE_RESET_TIMEOUT_MS 50
232 /* VFE halt timeout */
233 #define VFE_HALT_TIMEOUT_MS 100
234 /* Max number of frame drop updates per frame */
235 #define VFE_FRAME_DROP_UPDATES 5
236 /* Frame drop value. NOTE: VAL + UPDATES should not exceed 31 */
237 #define VFE_FRAME_DROP_VAL 20
239 #define VFE_NEXT_SOF_MS 500
241 #define CAMIF_TIMEOUT_SLEEP_US 1000
242 #define CAMIF_TIMEOUT_ALL_US 1000000
244 #define SCALER_RATIO_MAX 16
246 static const u32 vfe_formats[] = {
247 MEDIA_BUS_FMT_UYVY8_2X8,
248 MEDIA_BUS_FMT_VYUY8_2X8,
249 MEDIA_BUS_FMT_YUYV8_2X8,
250 MEDIA_BUS_FMT_YVYU8_2X8,
251 MEDIA_BUS_FMT_SBGGR8_1X8,
252 MEDIA_BUS_FMT_SGBRG8_1X8,
253 MEDIA_BUS_FMT_SGRBG8_1X8,
254 MEDIA_BUS_FMT_SRGGB8_1X8,
255 MEDIA_BUS_FMT_SBGGR10_1X10,
256 MEDIA_BUS_FMT_SGBRG10_1X10,
257 MEDIA_BUS_FMT_SGRBG10_1X10,
258 MEDIA_BUS_FMT_SRGGB10_1X10,
259 MEDIA_BUS_FMT_SBGGR12_1X12,
260 MEDIA_BUS_FMT_SGBRG12_1X12,
261 MEDIA_BUS_FMT_SGRBG12_1X12,
262 MEDIA_BUS_FMT_SRGGB12_1X12,
265 static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
267 u32 bits = readl_relaxed(vfe->base + reg);
269 writel_relaxed(bits & ~clr_bits, vfe->base + reg);
272 static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
274 u32 bits = readl_relaxed(vfe->base + reg);
276 writel_relaxed(bits | set_bits, vfe->base + reg);
279 static void vfe_global_reset(struct vfe_device *vfe)
281 u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
282 VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
283 VFE_0_GLOBAL_RESET_CMD_PM |
284 VFE_0_GLOBAL_RESET_CMD_TIMER |
285 VFE_0_GLOBAL_RESET_CMD_REGISTER |
286 VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
287 VFE_0_GLOBAL_RESET_CMD_BUS |
288 VFE_0_GLOBAL_RESET_CMD_CAMIF |
289 VFE_0_GLOBAL_RESET_CMD_CORE;
291 writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
294 static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
297 vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
298 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
300 vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
301 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
304 static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
307 vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
308 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
310 vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
311 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
314 #define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
316 static int vfe_word_per_line(uint32_t format, uint32_t pixel_per_line)
321 case V4L2_PIX_FMT_NV12:
322 case V4L2_PIX_FMT_NV21:
323 case V4L2_PIX_FMT_NV16:
324 case V4L2_PIX_FMT_NV61:
325 val = CALC_WORD(pixel_per_line, 1, 8);
327 case V4L2_PIX_FMT_YUYV:
328 case V4L2_PIX_FMT_YVYU:
329 case V4L2_PIX_FMT_UYVY:
330 case V4L2_PIX_FMT_VYUY:
331 val = CALC_WORD(pixel_per_line, 2, 8);
338 static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
339 u16 *width, u16 *height, u16 *bytesperline)
341 switch (pix->pixelformat) {
342 case V4L2_PIX_FMT_NV12:
343 case V4L2_PIX_FMT_NV21:
345 *height = pix->height;
346 *bytesperline = pix->plane_fmt[0].bytesperline;
350 case V4L2_PIX_FMT_NV16:
351 case V4L2_PIX_FMT_NV61:
353 *height = pix->height;
354 *bytesperline = pix->plane_fmt[0].bytesperline;
359 static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
360 struct v4l2_pix_format_mplane *pix,
361 u8 plane, u32 enable)
366 u16 width = 0, height = 0, bytesperline = 0, wpl;
368 vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
370 wpl = vfe_word_per_line(pix->pixelformat, width);
373 reg |= ((wpl + 1) / 2 - 1) << 16;
375 writel_relaxed(reg, vfe->base +
376 VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
378 wpl = vfe_word_per_line(pix->pixelformat, bytesperline);
381 reg |= (height - 1) << 4;
384 writel_relaxed(reg, vfe->base +
385 VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
387 writel_relaxed(0, vfe->base +
388 VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
389 writel_relaxed(0, vfe->base +
390 VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
394 static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
398 reg = readl_relaxed(vfe->base +
399 VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
401 reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
403 reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
404 & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
407 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
410 static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
413 writel_relaxed(pattern,
414 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
417 static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, u16 offset,
422 reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
424 writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
427 static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
430 writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
434 static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
437 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
440 static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
443 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
446 static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
450 reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
452 return (reg >> wm) & 0x1;
455 static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
458 writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG);
460 writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
463 static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
468 reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
469 reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
470 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
472 reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
473 reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
474 VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
475 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
480 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
481 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
484 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
485 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
488 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
489 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
496 vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
499 static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
501 writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
503 VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
506 static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
511 reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
512 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg);
514 reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
515 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
520 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
521 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
524 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
525 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
528 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
529 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
536 vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
539 static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
542 struct vfe_line *line = container_of(output, struct vfe_line, output);
543 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
547 for (i = 0; i < output->wm_num; i++) {
549 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
550 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
552 reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
553 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
554 reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
557 if (output->wm_idx[i] % 2 == 1)
562 VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
566 VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
571 static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
573 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
574 VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
576 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
577 cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
580 static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
582 vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
584 writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
588 static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
589 enum vfe_line_id line_id, u8 enable)
591 u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
592 VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
593 u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
594 VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
597 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
598 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
600 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
601 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
605 static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
606 enum vfe_line_id line_id, u8 enable)
608 struct vfe_output *output = &vfe->line[line_id].output;
614 irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
615 irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
616 irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
617 irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
618 irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
619 for (i = 0; i < output->wm_num; i++) {
620 irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
622 comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
626 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
627 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
628 vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
630 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
631 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
632 vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
636 static void vfe_enable_irq_common(struct vfe_device *vfe)
638 u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
639 u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
640 VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
642 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
643 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
646 static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
648 u32 val, even_cfg, odd_cfg;
650 writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
652 val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
653 writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
655 val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
656 writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
658 switch (line->fmt[MSM_VFE_PAD_SINK].code) {
659 case MEDIA_BUS_FMT_YUYV8_2X8:
660 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
661 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
663 case MEDIA_BUS_FMT_YVYU8_2X8:
664 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
665 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
667 case MEDIA_BUS_FMT_UYVY8_2X8:
669 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
670 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
672 case MEDIA_BUS_FMT_VYUY8_2X8:
673 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
674 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
678 writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
679 writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
682 static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
684 if (input / output >= 16)
687 if (input / output >= 8)
690 if (input / output >= 4)
696 static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
698 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
704 writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
706 input = line->fmt[MSM_VFE_PAD_SINK].width;
707 output = line->compose.width;
708 reg = (output << 16) | input;
709 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
711 interp_reso = vfe_calc_interp_reso(input, output);
712 phase_mult = input * (1 << (13 + interp_reso)) / output;
713 reg = (interp_reso << 20) | phase_mult;
714 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
716 input = line->fmt[MSM_VFE_PAD_SINK].height;
717 output = line->compose.height;
718 reg = (output << 16) | input;
719 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
721 interp_reso = vfe_calc_interp_reso(input, output);
722 phase_mult = input * (1 << (13 + interp_reso)) / output;
723 reg = (interp_reso << 20) | phase_mult;
724 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
726 writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
728 input = line->fmt[MSM_VFE_PAD_SINK].width;
729 output = line->compose.width / 2;
730 reg = (output << 16) | input;
731 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
733 interp_reso = vfe_calc_interp_reso(input, output);
734 phase_mult = input * (1 << (13 + interp_reso)) / output;
735 reg = (interp_reso << 20) | phase_mult;
736 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
738 input = line->fmt[MSM_VFE_PAD_SINK].height;
739 output = line->compose.height;
740 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
741 output = line->compose.height / 2;
742 reg = (output << 16) | input;
743 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
745 interp_reso = vfe_calc_interp_reso(input, output);
746 phase_mult = input * (1 << (13 + interp_reso)) / output;
747 reg = (interp_reso << 20) | phase_mult;
748 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
751 static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
753 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
757 first = line->crop.left;
758 last = line->crop.left + line->crop.width - 1;
759 reg = (first << 16) | last;
760 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
762 first = line->crop.top;
763 last = line->crop.top + line->crop.height - 1;
764 reg = (first << 16) | last;
765 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
767 first = line->crop.left / 2;
768 last = line->crop.left / 2 + line->crop.width / 2 - 1;
769 reg = (first << 16) | last;
770 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
772 first = line->crop.top;
773 last = line->crop.top + line->crop.height - 1;
774 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
775 first = line->crop.top / 2;
776 last = line->crop.top / 2 + line->crop.height / 2 - 1;
778 reg = (first << 16) | last;
779 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
782 static void vfe_set_clamp_cfg(struct vfe_device *vfe)
784 u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
785 VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
786 VFE_0_CLAMP_ENC_MAX_CFG_CH2;
788 writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
790 val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
791 VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
792 VFE_0_CLAMP_ENC_MIN_CFG_CH2;
794 writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
798 * vfe_reset - Trigger reset on VFE module and wait to complete
801 * Return 0 on success or a negative error code otherwise
803 static int vfe_reset(struct vfe_device *vfe)
807 reinit_completion(&vfe->reset_complete);
809 vfe_global_reset(vfe);
811 time = wait_for_completion_timeout(&vfe->reset_complete,
812 msecs_to_jiffies(VFE_RESET_TIMEOUT_MS));
814 dev_err(to_device(vfe), "VFE reset timeout\n");
822 * vfe_halt - Trigger halt on VFE module and wait to complete
825 * Return 0 on success or a negative error code otherwise
827 static int vfe_halt(struct vfe_device *vfe)
831 reinit_completion(&vfe->halt_complete);
833 writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
834 vfe->base + VFE_0_BUS_BDG_CMD);
836 time = wait_for_completion_timeout(&vfe->halt_complete,
837 msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
839 dev_err(to_device(vfe), "VFE halt timeout\n");
846 static void vfe_init_outputs(struct vfe_device *vfe)
850 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
851 struct vfe_output *output = &vfe->line[i].output;
853 output->state = VFE_OUTPUT_OFF;
854 output->buf[0] = NULL;
855 output->buf[1] = NULL;
856 INIT_LIST_HEAD(&output->pending_bufs);
859 if (vfe->line[i].id == VFE_LINE_PIX)
864 static void vfe_reset_output_maps(struct vfe_device *vfe)
868 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
869 vfe->wm_output_map[i] = VFE_LINE_NONE;
872 static void vfe_set_qos(struct vfe_device *vfe)
874 u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
875 u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
877 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
878 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
879 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
880 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
881 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
882 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
883 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
884 writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
887 static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
889 u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm);
892 vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val);
894 vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val);
899 static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
901 u32 val = VFE_0_MODULE_CFG_DEMUX |
902 VFE_0_MODULE_CFG_CHROMA_UPSAMPLE |
903 VFE_0_MODULE_CFG_SCALE_ENC |
904 VFE_0_MODULE_CFG_CROP_ENC;
907 writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG);
909 writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG);
912 static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
916 switch (line->fmt[MSM_VFE_PAD_SINK].code) {
917 case MEDIA_BUS_FMT_YUYV8_2X8:
918 val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
920 case MEDIA_BUS_FMT_YVYU8_2X8:
921 val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
923 case MEDIA_BUS_FMT_UYVY8_2X8:
925 val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
927 case MEDIA_BUS_FMT_VYUY8_2X8:
928 val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
932 writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
934 val = line->fmt[MSM_VFE_PAD_SINK].width * 2;
935 val |= line->fmt[MSM_VFE_PAD_SINK].height << 16;
936 writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
938 val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
939 writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
941 val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
942 writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
945 writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0);
948 writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
950 val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
951 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
953 val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
954 writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
957 static void vfe_set_camif_cmd(struct vfe_device *vfe, u32 cmd)
959 writel_relaxed(VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS,
960 vfe->base + VFE_0_CAMIF_CMD);
962 writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
965 static int vfe_camif_wait_for_stop(struct vfe_device *vfe)
970 ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
972 (val & VFE_0_CAMIF_STATUS_HALT),
973 CAMIF_TIMEOUT_SLEEP_US,
974 CAMIF_TIMEOUT_ALL_US);
976 dev_err(to_device(vfe), "%s: camif stop timeout\n", __func__);
981 static void vfe_output_init_addrs(struct vfe_device *vfe,
982 struct vfe_output *output, u8 sync)
988 output->active_buf = 0;
990 for (i = 0; i < output->wm_num; i++) {
992 ping_addr = output->buf[0]->addr[i];
997 pong_addr = output->buf[1]->addr[i];
999 pong_addr = ping_addr;
1001 vfe_wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
1002 vfe_wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
1004 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1008 static void vfe_output_update_ping_addr(struct vfe_device *vfe,
1009 struct vfe_output *output, u8 sync)
1014 for (i = 0; i < output->wm_num; i++) {
1016 addr = output->buf[0]->addr[i];
1020 vfe_wm_set_ping_addr(vfe, output->wm_idx[i], addr);
1022 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1026 static void vfe_output_update_pong_addr(struct vfe_device *vfe,
1027 struct vfe_output *output, u8 sync)
1032 for (i = 0; i < output->wm_num; i++) {
1034 addr = output->buf[1]->addr[i];
1038 vfe_wm_set_pong_addr(vfe, output->wm_idx[i], addr);
1040 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1045 static int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id)
1050 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) {
1051 if (vfe->wm_output_map[i] == VFE_LINE_NONE) {
1052 vfe->wm_output_map[i] = line_id;
1061 static int vfe_release_wm(struct vfe_device *vfe, u8 wm)
1063 if (wm > ARRAY_SIZE(vfe->wm_output_map))
1066 vfe->wm_output_map[wm] = VFE_LINE_NONE;
1071 static void vfe_output_frame_drop(struct vfe_device *vfe,
1072 struct vfe_output *output,
1078 /* We need to toggle update period to be valid on next frame */
1079 output->drop_update_idx++;
1080 output->drop_update_idx %= VFE_FRAME_DROP_UPDATES;
1081 drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
1083 for (i = 0; i < output->wm_num; i++) {
1084 vfe_wm_set_framedrop_period(vfe, output->wm_idx[i],
1086 vfe_wm_set_framedrop_pattern(vfe, output->wm_idx[i],
1089 vfe_reg_update(vfe, container_of(output, struct vfe_line, output)->id);
1092 static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output)
1094 struct camss_buffer *buffer = NULL;
1096 if (!list_empty(&output->pending_bufs)) {
1097 buffer = list_first_entry(&output->pending_bufs,
1098 struct camss_buffer,
1100 list_del(&buffer->queue);
1107 * vfe_buf_add_pending - Add output buffer to list of pending
1108 * @output: VFE output
1109 * @buffer: Video buffer
1111 static void vfe_buf_add_pending(struct vfe_output *output,
1112 struct camss_buffer *buffer)
1114 INIT_LIST_HEAD(&buffer->queue);
1115 list_add_tail(&buffer->queue, &output->pending_bufs);
1119 * vfe_buf_flush_pending - Flush all pending buffers.
1120 * @output: VFE output
1121 * @state: vb2 buffer state
1123 static void vfe_buf_flush_pending(struct vfe_output *output,
1124 enum vb2_buffer_state state)
1126 struct camss_buffer *buf;
1127 struct camss_buffer *t;
1129 list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
1130 vb2_buffer_done(&buf->vb.vb2_buf, state);
1131 list_del(&buf->queue);
1135 static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
1136 struct vfe_output *output)
1138 switch (output->state) {
1139 case VFE_OUTPUT_CONTINUOUS:
1140 vfe_output_frame_drop(vfe, output, 3);
1142 case VFE_OUTPUT_SINGLE:
1144 dev_err_ratelimited(to_device(vfe),
1145 "Next buf in wrong state! %d\n",
1151 static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
1152 struct vfe_output *output)
1154 switch (output->state) {
1155 case VFE_OUTPUT_CONTINUOUS:
1156 output->state = VFE_OUTPUT_SINGLE;
1157 vfe_output_frame_drop(vfe, output, 1);
1159 case VFE_OUTPUT_SINGLE:
1160 output->state = VFE_OUTPUT_STOPPING;
1161 vfe_output_frame_drop(vfe, output, 0);
1164 dev_err_ratelimited(to_device(vfe),
1165 "Last buff in wrong state! %d\n",
1171 static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
1172 struct vfe_output *output,
1173 struct camss_buffer *new_buf)
1177 switch (output->state) {
1178 case VFE_OUTPUT_SINGLE:
1179 inactive_idx = !output->active_buf;
1181 if (!output->buf[inactive_idx]) {
1182 output->buf[inactive_idx] = new_buf;
1185 vfe_output_update_pong_addr(vfe, output, 0);
1187 vfe_output_update_ping_addr(vfe, output, 0);
1189 vfe_output_frame_drop(vfe, output, 3);
1190 output->state = VFE_OUTPUT_CONTINUOUS;
1192 vfe_buf_add_pending(output, new_buf);
1193 dev_err_ratelimited(to_device(vfe),
1194 "Inactive buffer is busy\n");
1198 case VFE_OUTPUT_IDLE:
1199 if (!output->buf[0]) {
1200 output->buf[0] = new_buf;
1202 vfe_output_init_addrs(vfe, output, 1);
1204 vfe_output_frame_drop(vfe, output, 1);
1205 output->state = VFE_OUTPUT_SINGLE;
1207 vfe_buf_add_pending(output, new_buf);
1208 dev_err_ratelimited(to_device(vfe),
1209 "Output idle with buffer set!\n");
1213 case VFE_OUTPUT_CONTINUOUS:
1215 vfe_buf_add_pending(output, new_buf);
1220 static int vfe_get_output(struct vfe_line *line)
1222 struct vfe_device *vfe = to_vfe(line);
1223 struct vfe_output *output;
1224 unsigned long flags;
1228 spin_lock_irqsave(&vfe->output_lock, flags);
1230 output = &line->output;
1231 if (output->state != VFE_OUTPUT_OFF) {
1232 dev_err(to_device(vfe), "Output is running\n");
1235 output->state = VFE_OUTPUT_RESERVED;
1237 output->active_buf = 0;
1239 for (i = 0; i < output->wm_num; i++) {
1240 wm_idx = vfe_reserve_wm(vfe, line->id);
1242 dev_err(to_device(vfe), "Can not reserve wm\n");
1245 output->wm_idx[i] = wm_idx;
1248 output->drop_update_idx = 0;
1250 spin_unlock_irqrestore(&vfe->output_lock, flags);
1255 for (i--; i >= 0; i--)
1256 vfe_release_wm(vfe, output->wm_idx[i]);
1257 output->state = VFE_OUTPUT_OFF;
1259 spin_unlock_irqrestore(&vfe->output_lock, flags);
1264 static int vfe_put_output(struct vfe_line *line)
1266 struct vfe_device *vfe = to_vfe(line);
1267 struct vfe_output *output = &line->output;
1268 unsigned long flags;
1271 spin_lock_irqsave(&vfe->output_lock, flags);
1273 for (i = 0; i < output->wm_num; i++)
1274 vfe_release_wm(vfe, output->wm_idx[i]);
1276 output->state = VFE_OUTPUT_OFF;
1278 spin_unlock_irqrestore(&vfe->output_lock, flags);
1282 static int vfe_enable_output(struct vfe_line *line)
1284 struct vfe_device *vfe = to_vfe(line);
1285 struct vfe_output *output = &line->output;
1286 unsigned long flags;
1292 ub_size = MSM_VFE_VFE0_UB_SIZE_RDI;
1295 ub_size = MSM_VFE_VFE1_UB_SIZE_RDI;
1301 spin_lock_irqsave(&vfe->output_lock, flags);
1303 vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line->id);
1305 if (output->state != VFE_OUTPUT_RESERVED) {
1306 dev_err(to_device(vfe), "Output is not in reserved state %d\n",
1308 spin_unlock_irqrestore(&vfe->output_lock, flags);
1311 output->state = VFE_OUTPUT_IDLE;
1313 output->buf[0] = vfe_buf_get_pending(output);
1314 output->buf[1] = vfe_buf_get_pending(output);
1316 if (!output->buf[0] && output->buf[1]) {
1317 output->buf[0] = output->buf[1];
1318 output->buf[1] = NULL;
1322 output->state = VFE_OUTPUT_SINGLE;
1325 output->state = VFE_OUTPUT_CONTINUOUS;
1327 switch (output->state) {
1328 case VFE_OUTPUT_SINGLE:
1329 vfe_output_frame_drop(vfe, output, 1);
1331 case VFE_OUTPUT_CONTINUOUS:
1332 vfe_output_frame_drop(vfe, output, 3);
1335 vfe_output_frame_drop(vfe, output, 0);
1339 output->sequence = 0;
1340 output->wait_sof = 0;
1341 output->wait_reg_update = 0;
1342 reinit_completion(&output->sof);
1343 reinit_completion(&output->reg_update);
1345 vfe_output_init_addrs(vfe, output, 0);
1347 if (line->id != VFE_LINE_PIX) {
1348 vfe_set_cgc_override(vfe, output->wm_idx[0], 1);
1349 vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
1350 vfe_bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
1351 vfe_wm_set_subsample(vfe, output->wm_idx[0]);
1352 vfe_set_rdi_cid(vfe, line->id, 0);
1353 vfe_wm_set_ub_cfg(vfe, output->wm_idx[0],
1354 (ub_size + 1) * output->wm_idx[0], ub_size);
1355 vfe_wm_frame_based(vfe, output->wm_idx[0], 1);
1356 vfe_wm_enable(vfe, output->wm_idx[0], 1);
1357 vfe_bus_reload_wm(vfe, output->wm_idx[0]);
1359 ub_size /= output->wm_num;
1360 for (i = 0; i < output->wm_num; i++) {
1361 vfe_set_cgc_override(vfe, output->wm_idx[i], 1);
1362 vfe_wm_set_subsample(vfe, output->wm_idx[i]);
1363 vfe_wm_set_ub_cfg(vfe, output->wm_idx[i],
1364 (ub_size + 1) * output->wm_idx[i],
1366 vfe_wm_line_based(vfe, output->wm_idx[i],
1367 &line->video_out.active_fmt.fmt.pix_mp,
1369 vfe_wm_enable(vfe, output->wm_idx[i], 1);
1370 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1372 vfe_enable_irq_pix_line(vfe, 0, line->id, 1);
1373 vfe_set_module_cfg(vfe, 1);
1374 vfe_set_camif_cfg(vfe, line);
1375 vfe_set_xbar_cfg(vfe, output, 1);
1376 vfe_set_demux_cfg(vfe, line);
1377 vfe_set_scale_cfg(vfe, line);
1378 vfe_set_crop_cfg(vfe, line);
1379 vfe_set_clamp_cfg(vfe);
1380 vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY);
1383 vfe_reg_update(vfe, line->id);
1385 spin_unlock_irqrestore(&vfe->output_lock, flags);
1390 static int vfe_disable_output(struct vfe_line *line)
1392 struct vfe_device *vfe = to_vfe(line);
1393 struct vfe_output *output = &line->output;
1394 unsigned long flags;
1398 spin_lock_irqsave(&vfe->output_lock, flags);
1400 output->wait_sof = 1;
1401 spin_unlock_irqrestore(&vfe->output_lock, flags);
1403 time = wait_for_completion_timeout(&output->sof,
1404 msecs_to_jiffies(VFE_NEXT_SOF_MS));
1406 dev_err(to_device(vfe), "VFE sof timeout\n");
1408 spin_lock_irqsave(&vfe->output_lock, flags);
1409 for (i = 0; i < output->wm_num; i++)
1410 vfe_wm_enable(vfe, output->wm_idx[i], 0);
1412 vfe_reg_update(vfe, line->id);
1413 output->wait_reg_update = 1;
1414 spin_unlock_irqrestore(&vfe->output_lock, flags);
1416 time = wait_for_completion_timeout(&output->reg_update,
1417 msecs_to_jiffies(VFE_NEXT_SOF_MS));
1419 dev_err(to_device(vfe), "VFE reg update timeout\n");
1421 spin_lock_irqsave(&vfe->output_lock, flags);
1423 if (line->id != VFE_LINE_PIX) {
1424 vfe_wm_frame_based(vfe, output->wm_idx[0], 0);
1425 vfe_bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id);
1426 vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
1427 vfe_set_cgc_override(vfe, output->wm_idx[0], 0);
1428 spin_unlock_irqrestore(&vfe->output_lock, flags);
1430 for (i = 0; i < output->wm_num; i++) {
1431 vfe_wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
1432 vfe_set_cgc_override(vfe, output->wm_idx[i], 0);
1435 vfe_enable_irq_pix_line(vfe, 0, line->id, 0);
1436 vfe_set_module_cfg(vfe, 0);
1437 vfe_set_xbar_cfg(vfe, output, 0);
1439 vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY);
1440 spin_unlock_irqrestore(&vfe->output_lock, flags);
1442 vfe_camif_wait_for_stop(vfe);
1449 * vfe_enable - Enable streaming on VFE line
1452 * Return 0 on success or a negative error code otherwise
1454 static int vfe_enable(struct vfe_line *line)
1456 struct vfe_device *vfe = to_vfe(line);
1459 mutex_lock(&vfe->stream_lock);
1461 if (!vfe->stream_count) {
1462 vfe_enable_irq_common(vfe);
1464 vfe_bus_enable_wr_if(vfe, 1);
1469 vfe->stream_count++;
1471 mutex_unlock(&vfe->stream_lock);
1473 ret = vfe_get_output(line);
1475 goto error_get_output;
1477 ret = vfe_enable_output(line);
1479 goto error_enable_output;
1481 vfe->was_streaming = 1;
1486 error_enable_output:
1487 vfe_put_output(line);
1490 mutex_lock(&vfe->stream_lock);
1492 if (vfe->stream_count == 1)
1493 vfe_bus_enable_wr_if(vfe, 0);
1495 vfe->stream_count--;
1497 mutex_unlock(&vfe->stream_lock);
1503 * vfe_disable - Disable streaming on VFE line
1506 * Return 0 on success or a negative error code otherwise
1508 static int vfe_disable(struct vfe_line *line)
1510 struct vfe_device *vfe = to_vfe(line);
1512 vfe_disable_output(line);
1514 vfe_put_output(line);
1516 mutex_lock(&vfe->stream_lock);
1518 if (vfe->stream_count == 1)
1519 vfe_bus_enable_wr_if(vfe, 0);
1521 vfe->stream_count--;
1523 mutex_unlock(&vfe->stream_lock);
1529 * vfe_isr_sof - Process start of frame interrupt
1531 * @line_id: VFE line
1533 static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
1535 struct vfe_output *output;
1536 unsigned long flags;
1538 spin_lock_irqsave(&vfe->output_lock, flags);
1539 output = &vfe->line[line_id].output;
1540 if (output->wait_sof) {
1541 output->wait_sof = 0;
1542 complete(&output->sof);
1544 spin_unlock_irqrestore(&vfe->output_lock, flags);
1548 * vfe_isr_reg_update - Process reg update interrupt
1550 * @line_id: VFE line
1552 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
1554 struct vfe_output *output;
1555 unsigned long flags;
1557 spin_lock_irqsave(&vfe->output_lock, flags);
1558 vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
1560 output = &vfe->line[line_id].output;
1562 if (output->wait_reg_update) {
1563 output->wait_reg_update = 0;
1564 complete(&output->reg_update);
1565 spin_unlock_irqrestore(&vfe->output_lock, flags);
1569 if (output->state == VFE_OUTPUT_STOPPING) {
1570 /* Release last buffer when hw is idle */
1571 if (output->last_buffer) {
1572 vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
1573 VB2_BUF_STATE_DONE);
1574 output->last_buffer = NULL;
1576 output->state = VFE_OUTPUT_IDLE;
1578 /* Buffers received in stopping state are queued in */
1579 /* dma pending queue, start next capture here */
1581 output->buf[0] = vfe_buf_get_pending(output);
1582 output->buf[1] = vfe_buf_get_pending(output);
1584 if (!output->buf[0] && output->buf[1]) {
1585 output->buf[0] = output->buf[1];
1586 output->buf[1] = NULL;
1590 output->state = VFE_OUTPUT_SINGLE;
1593 output->state = VFE_OUTPUT_CONTINUOUS;
1595 switch (output->state) {
1596 case VFE_OUTPUT_SINGLE:
1597 vfe_output_frame_drop(vfe, output, 2);
1599 case VFE_OUTPUT_CONTINUOUS:
1600 vfe_output_frame_drop(vfe, output, 3);
1603 vfe_output_frame_drop(vfe, output, 0);
1607 vfe_output_init_addrs(vfe, output, 1);
1610 spin_unlock_irqrestore(&vfe->output_lock, flags);
1614 * vfe_isr_wm_done - Process write master done interrupt
1616 * @wm: Write master id
1618 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
1620 struct camss_buffer *ready_buf;
1621 struct vfe_output *output;
1622 dma_addr_t *new_addr;
1623 unsigned long flags;
1625 u64 ts = ktime_get_ns();
1628 active_index = vfe_wm_get_ping_pong_status(vfe, wm);
1630 spin_lock_irqsave(&vfe->output_lock, flags);
1632 if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
1633 dev_err_ratelimited(to_device(vfe),
1634 "Received wm done for unmapped index\n");
1637 output = &vfe->line[vfe->wm_output_map[wm]].output;
1639 if (output->active_buf == active_index) {
1640 dev_err_ratelimited(to_device(vfe),
1641 "Active buffer mismatch!\n");
1644 output->active_buf = active_index;
1646 ready_buf = output->buf[!active_index];
1648 dev_err_ratelimited(to_device(vfe),
1649 "Missing ready buf %d %d!\n",
1650 !active_index, output->state);
1654 ready_buf->vb.vb2_buf.timestamp = ts;
1655 ready_buf->vb.sequence = output->sequence++;
1657 /* Get next buffer */
1658 output->buf[!active_index] = vfe_buf_get_pending(output);
1659 if (!output->buf[!active_index]) {
1660 /* No next buffer - set same address */
1661 new_addr = ready_buf->addr;
1662 vfe_buf_update_wm_on_last(vfe, output);
1664 new_addr = output->buf[!active_index]->addr;
1665 vfe_buf_update_wm_on_next(vfe, output);
1669 for (i = 0; i < output->wm_num; i++)
1670 vfe_wm_set_ping_addr(vfe, output->wm_idx[i],
1673 for (i = 0; i < output->wm_num; i++)
1674 vfe_wm_set_pong_addr(vfe, output->wm_idx[i],
1677 spin_unlock_irqrestore(&vfe->output_lock, flags);
1679 if (output->state == VFE_OUTPUT_STOPPING)
1680 output->last_buffer = ready_buf;
1682 vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1687 spin_unlock_irqrestore(&vfe->output_lock, flags);
1691 * vfe_isr_wm_done - Process composite image done interrupt
1693 * @comp: Composite image id
1695 static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp)
1699 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
1700 if (vfe->wm_output_map[i] == VFE_LINE_PIX) {
1701 vfe_isr_wm_done(vfe, i);
1707 * vfe_isr - ISPIF module interrupt handler
1708 * @irq: Interrupt line
1711 * Return IRQ_HANDLED on success
1713 static irqreturn_t vfe_isr(int irq, void *dev)
1715 struct vfe_device *vfe = dev;
1720 value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
1721 value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
1723 writel_relaxed(value0, vfe->base + VFE_0_IRQ_CLEAR_0);
1724 writel_relaxed(value1, vfe->base + VFE_0_IRQ_CLEAR_1);
1727 writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
1729 if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
1730 complete(&vfe->reset_complete);
1732 if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) {
1733 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
1734 dev_err_ratelimited(to_device(vfe),
1735 "VFE: violation = 0x%08x\n", violation);
1738 if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) {
1739 complete(&vfe->halt_complete);
1740 writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
1743 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
1744 if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
1745 vfe_isr_reg_update(vfe, i);
1747 if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
1748 vfe_isr_sof(vfe, VFE_LINE_PIX);
1750 for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
1751 if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
1752 vfe_isr_sof(vfe, i);
1754 for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
1755 if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
1756 vfe_isr_comp_done(vfe, i);
1757 for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
1758 if (vfe->wm_output_map[j] == VFE_LINE_PIX)
1759 value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
1762 for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
1763 if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
1764 vfe_isr_wm_done(vfe, i);
1770 * vfe_get - Power up and reset VFE module
1773 * Return 0 on success or a negative error code otherwise
1775 static int vfe_get(struct vfe_device *vfe)
1779 mutex_lock(&vfe->power_lock);
1781 if (vfe->power_count == 0) {
1782 ret = camss_enable_clocks(vfe->nclocks, vfe->clock,
1787 ret = vfe_reset(vfe);
1791 vfe_reset_output_maps(vfe);
1793 vfe_init_outputs(vfe);
1797 mutex_unlock(&vfe->power_lock);
1802 camss_disable_clocks(vfe->nclocks, vfe->clock);
1805 mutex_unlock(&vfe->power_lock);
1811 * vfe_put - Power down VFE module
1814 static void vfe_put(struct vfe_device *vfe)
1816 mutex_lock(&vfe->power_lock);
1818 if (vfe->power_count == 0) {
1819 dev_err(to_device(vfe), "vfe power off on power_count == 0\n");
1821 } else if (vfe->power_count == 1) {
1822 if (vfe->was_streaming) {
1823 vfe->was_streaming = 0;
1826 camss_disable_clocks(vfe->nclocks, vfe->clock);
1832 mutex_unlock(&vfe->power_lock);
1836 * vfe_video_pad_to_line - Get pointer to VFE line by media pad
1839 * Return pointer to vfe line structure
1841 static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad)
1843 struct media_pad *vfe_pad;
1844 struct v4l2_subdev *subdev;
1846 vfe_pad = media_entity_remote_pad(pad);
1847 if (vfe_pad == NULL)
1850 subdev = media_entity_to_v4l2_subdev(vfe_pad->entity);
1852 return container_of(subdev, struct vfe_line, subdev);
1856 * vfe_queue_buffer - Add empty buffer
1857 * @vid: Video device structure
1858 * @buf: Buffer to be enqueued
1860 * Add an empty buffer - depending on the current number of buffers it will be
1861 * put in pending buffer queue or directly given to the hardware to be filled.
1863 * Return 0 on success or a negative error code otherwise
1865 static int vfe_queue_buffer(struct camss_video *vid,
1866 struct camss_buffer *buf)
1868 struct vfe_device *vfe = &vid->camss->vfe;
1869 struct vfe_line *line;
1870 struct vfe_output *output;
1871 unsigned long flags;
1873 line = vfe_video_pad_to_line(&vid->pad);
1875 dev_err(to_device(vfe), "Can not queue buffer\n");
1878 output = &line->output;
1880 spin_lock_irqsave(&vfe->output_lock, flags);
1882 vfe_buf_update_wm_on_new(vfe, output, buf);
1884 spin_unlock_irqrestore(&vfe->output_lock, flags);
1890 * vfe_flush_buffers - Return all vb2 buffers
1891 * @vid: Video device structure
1892 * @state: vb2 buffer state of the returned buffers
1894 * Return all buffers to vb2. This includes queued pending buffers (still
1895 * unused) and any buffers given to the hardware but again still not used.
1897 * Return 0 on success or a negative error code otherwise
1899 static int vfe_flush_buffers(struct camss_video *vid,
1900 enum vb2_buffer_state state)
1902 struct vfe_device *vfe = &vid->camss->vfe;
1903 struct vfe_line *line;
1904 struct vfe_output *output;
1905 unsigned long flags;
1907 line = vfe_video_pad_to_line(&vid->pad);
1909 dev_err(to_device(vfe), "Can not flush buffers\n");
1912 output = &line->output;
1914 spin_lock_irqsave(&vfe->output_lock, flags);
1916 vfe_buf_flush_pending(output, state);
1919 vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state);
1922 vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state);
1924 if (output->last_buffer) {
1925 vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state);
1926 output->last_buffer = NULL;
1929 spin_unlock_irqrestore(&vfe->output_lock, flags);
1935 * vfe_set_power - Power on/off VFE module
1936 * @sd: VFE V4L2 subdevice
1937 * @on: Requested power state
1939 * Return 0 on success or a negative error code otherwise
1941 static int vfe_set_power(struct v4l2_subdev *sd, int on)
1943 struct vfe_line *line = v4l2_get_subdevdata(sd);
1944 struct vfe_device *vfe = to_vfe(line);
1954 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
1955 dev_dbg(to_device(vfe),
1956 "VFE HW Version = 0x%08x\n", hw_version);
1965 * vfe_set_stream - Enable/disable streaming on VFE module
1966 * @sd: VFE V4L2 subdevice
1967 * @enable: Requested streaming state
1969 * Main configuration of VFE module is triggered here.
1971 * Return 0 on success or a negative error code otherwise
1973 static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
1975 struct vfe_line *line = v4l2_get_subdevdata(sd);
1976 struct vfe_device *vfe = to_vfe(line);
1980 ret = vfe_enable(line);
1982 dev_err(to_device(vfe),
1983 "Failed to enable vfe outputs\n");
1985 ret = vfe_disable(line);
1987 dev_err(to_device(vfe),
1988 "Failed to disable vfe outputs\n");
1995 * __vfe_get_format - Get pointer to format structure
1997 * @cfg: V4L2 subdev pad configuration
1998 * @pad: pad from which format is requested
1999 * @which: TRY or ACTIVE format
2001 * Return pointer to TRY or ACTIVE format structure
2003 static struct v4l2_mbus_framefmt *
2004 __vfe_get_format(struct vfe_line *line,
2005 struct v4l2_subdev_pad_config *cfg,
2007 enum v4l2_subdev_format_whence which)
2009 if (which == V4L2_SUBDEV_FORMAT_TRY)
2010 return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
2012 return &line->fmt[pad];
2016 * __vfe_get_compose - Get pointer to compose selection structure
2018 * @cfg: V4L2 subdev pad configuration
2019 * @which: TRY or ACTIVE format
2021 * Return pointer to TRY or ACTIVE compose rectangle structure
2023 static struct v4l2_rect *
2024 __vfe_get_compose(struct vfe_line *line,
2025 struct v4l2_subdev_pad_config *cfg,
2026 enum v4l2_subdev_format_whence which)
2028 if (which == V4L2_SUBDEV_FORMAT_TRY)
2029 return v4l2_subdev_get_try_compose(&line->subdev, cfg,
2032 return &line->compose;
2036 * __vfe_get_crop - Get pointer to crop selection structure
2038 * @cfg: V4L2 subdev pad configuration
2039 * @which: TRY or ACTIVE format
2041 * Return pointer to TRY or ACTIVE crop rectangle structure
2043 static struct v4l2_rect *
2044 __vfe_get_crop(struct vfe_line *line,
2045 struct v4l2_subdev_pad_config *cfg,
2046 enum v4l2_subdev_format_whence which)
2048 if (which == V4L2_SUBDEV_FORMAT_TRY)
2049 return v4l2_subdev_get_try_crop(&line->subdev, cfg,
2056 * vfe_try_format - Handle try format by pad subdev method
2058 * @cfg: V4L2 subdev pad configuration
2059 * @pad: pad on which format is requested
2060 * @fmt: pointer to v4l2 format structure
2061 * @which: wanted subdev format
2063 static void vfe_try_format(struct vfe_line *line,
2064 struct v4l2_subdev_pad_config *cfg,
2066 struct v4l2_mbus_framefmt *fmt,
2067 enum v4l2_subdev_format_whence which)
2073 case MSM_VFE_PAD_SINK:
2074 /* Set format on sink pad */
2076 for (i = 0; i < ARRAY_SIZE(vfe_formats); i++)
2077 if (fmt->code == vfe_formats[i])
2080 /* If not found, use UYVY as default */
2081 if (i >= ARRAY_SIZE(vfe_formats))
2082 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
2084 fmt->width = clamp_t(u32, fmt->width, 1, 8191);
2085 fmt->height = clamp_t(u32, fmt->height, 1, 8191);
2087 fmt->field = V4L2_FIELD_NONE;
2088 fmt->colorspace = V4L2_COLORSPACE_SRGB;
2092 case MSM_VFE_PAD_SRC:
2093 /* Set and return a format same as sink pad */
2097 *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
2100 if (line->id == VFE_LINE_PIX) {
2101 struct v4l2_rect *rect;
2103 rect = __vfe_get_crop(line, cfg, which);
2105 fmt->width = rect->width;
2106 fmt->height = rect->height;
2108 switch (fmt->code) {
2109 case MEDIA_BUS_FMT_YUYV8_2X8:
2110 if (code == MEDIA_BUS_FMT_YUYV8_1_5X8)
2111 fmt->code = MEDIA_BUS_FMT_YUYV8_1_5X8;
2113 fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
2115 case MEDIA_BUS_FMT_YVYU8_2X8:
2116 if (code == MEDIA_BUS_FMT_YVYU8_1_5X8)
2117 fmt->code = MEDIA_BUS_FMT_YVYU8_1_5X8;
2119 fmt->code = MEDIA_BUS_FMT_YVYU8_2X8;
2121 case MEDIA_BUS_FMT_UYVY8_2X8:
2123 if (code == MEDIA_BUS_FMT_UYVY8_1_5X8)
2124 fmt->code = MEDIA_BUS_FMT_UYVY8_1_5X8;
2126 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
2128 case MEDIA_BUS_FMT_VYUY8_2X8:
2129 if (code == MEDIA_BUS_FMT_VYUY8_1_5X8)
2130 fmt->code = MEDIA_BUS_FMT_VYUY8_1_5X8;
2132 fmt->code = MEDIA_BUS_FMT_VYUY8_2X8;
2140 fmt->colorspace = V4L2_COLORSPACE_SRGB;
2144 * vfe_try_compose - Handle try compose selection by pad subdev method
2146 * @cfg: V4L2 subdev pad configuration
2147 * @rect: pointer to v4l2 rect structure
2148 * @which: wanted subdev format
2150 static void vfe_try_compose(struct vfe_line *line,
2151 struct v4l2_subdev_pad_config *cfg,
2152 struct v4l2_rect *rect,
2153 enum v4l2_subdev_format_whence which)
2155 struct v4l2_mbus_framefmt *fmt;
2157 fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
2159 if (rect->width > fmt->width)
2160 rect->width = fmt->width;
2162 if (rect->height > fmt->height)
2163 rect->height = fmt->height;
2165 if (fmt->width > rect->width * SCALER_RATIO_MAX)
2166 rect->width = (fmt->width + SCALER_RATIO_MAX - 1) /
2169 rect->width &= ~0x1;
2171 if (fmt->height > rect->height * SCALER_RATIO_MAX)
2172 rect->height = (fmt->height + SCALER_RATIO_MAX - 1) /
2175 if (rect->width < 16)
2178 if (rect->height < 4)
2183 * vfe_try_crop - Handle try crop selection by pad subdev method
2185 * @cfg: V4L2 subdev pad configuration
2186 * @rect: pointer to v4l2 rect structure
2187 * @which: wanted subdev format
2189 static void vfe_try_crop(struct vfe_line *line,
2190 struct v4l2_subdev_pad_config *cfg,
2191 struct v4l2_rect *rect,
2192 enum v4l2_subdev_format_whence which)
2194 struct v4l2_rect *compose;
2196 compose = __vfe_get_compose(line, cfg, which);
2198 if (rect->width > compose->width)
2199 rect->width = compose->width;
2201 if (rect->width + rect->left > compose->width)
2202 rect->left = compose->width - rect->width;
2204 if (rect->height > compose->height)
2205 rect->height = compose->height;
2207 if (rect->height + rect->top > compose->height)
2208 rect->top = compose->height - rect->height;
2210 /* wm in line based mode writes multiple of 16 horizontally */
2211 rect->left += (rect->width & 0xf) >> 1;
2212 rect->width &= ~0xf;
2214 if (rect->width < 16) {
2219 if (rect->height < 4) {
2226 * vfe_enum_mbus_code - Handle pixel format enumeration
2227 * @sd: VFE V4L2 subdevice
2228 * @cfg: V4L2 subdev pad configuration
2229 * @code: pointer to v4l2_subdev_mbus_code_enum structure
2231 * return -EINVAL or zero on success
2233 static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
2234 struct v4l2_subdev_pad_config *cfg,
2235 struct v4l2_subdev_mbus_code_enum *code)
2237 struct vfe_line *line = v4l2_get_subdevdata(sd);
2238 struct v4l2_mbus_framefmt *format;
2240 if (code->pad == MSM_VFE_PAD_SINK) {
2241 if (code->index >= ARRAY_SIZE(vfe_formats))
2244 code->code = vfe_formats[code->index];
2246 if (code->index > 0)
2249 format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
2252 code->code = format->code;
2259 * vfe_enum_frame_size - Handle frame size enumeration
2260 * @sd: VFE V4L2 subdevice
2261 * @cfg: V4L2 subdev pad configuration
2262 * @fse: pointer to v4l2_subdev_frame_size_enum structure
2264 * Return -EINVAL or zero on success
2266 static int vfe_enum_frame_size(struct v4l2_subdev *sd,
2267 struct v4l2_subdev_pad_config *cfg,
2268 struct v4l2_subdev_frame_size_enum *fse)
2270 struct vfe_line *line = v4l2_get_subdevdata(sd);
2271 struct v4l2_mbus_framefmt format;
2273 if (fse->index != 0)
2276 format.code = fse->code;
2279 vfe_try_format(line, cfg, fse->pad, &format, fse->which);
2280 fse->min_width = format.width;
2281 fse->min_height = format.height;
2283 if (format.code != fse->code)
2286 format.code = fse->code;
2289 vfe_try_format(line, cfg, fse->pad, &format, fse->which);
2290 fse->max_width = format.width;
2291 fse->max_height = format.height;
2297 * vfe_get_format - Handle get format by pads subdev method
2298 * @sd: VFE V4L2 subdevice
2299 * @cfg: V4L2 subdev pad configuration
2300 * @fmt: pointer to v4l2 subdev format structure
2302 * Return -EINVAL or zero on success
2304 static int vfe_get_format(struct v4l2_subdev *sd,
2305 struct v4l2_subdev_pad_config *cfg,
2306 struct v4l2_subdev_format *fmt)
2308 struct vfe_line *line = v4l2_get_subdevdata(sd);
2309 struct v4l2_mbus_framefmt *format;
2311 format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
2315 fmt->format = *format;
2320 static int vfe_set_selection(struct v4l2_subdev *sd,
2321 struct v4l2_subdev_pad_config *cfg,
2322 struct v4l2_subdev_selection *sel);
2325 * vfe_set_format - Handle set format by pads subdev method
2326 * @sd: VFE V4L2 subdevice
2327 * @cfg: V4L2 subdev pad configuration
2328 * @fmt: pointer to v4l2 subdev format structure
2330 * Return -EINVAL or zero on success
2332 static int vfe_set_format(struct v4l2_subdev *sd,
2333 struct v4l2_subdev_pad_config *cfg,
2334 struct v4l2_subdev_format *fmt)
2336 struct vfe_line *line = v4l2_get_subdevdata(sd);
2337 struct v4l2_mbus_framefmt *format;
2339 format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
2343 vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
2344 *format = fmt->format;
2346 if (fmt->pad == MSM_VFE_PAD_SINK) {
2347 struct v4l2_subdev_selection sel = { 0 };
2350 /* Propagate the format from sink to source */
2351 format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC,
2354 *format = fmt->format;
2355 vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format,
2358 if (line->id != VFE_LINE_PIX)
2361 /* Reset sink pad compose selection */
2362 sel.which = fmt->which;
2363 sel.pad = MSM_VFE_PAD_SINK;
2364 sel.target = V4L2_SEL_TGT_COMPOSE;
2365 sel.r.width = fmt->format.width;
2366 sel.r.height = fmt->format.height;
2367 ret = vfe_set_selection(sd, cfg, &sel);
2376 * vfe_get_selection - Handle get selection by pads subdev method
2377 * @sd: VFE V4L2 subdevice
2378 * @cfg: V4L2 subdev pad configuration
2379 * @sel: pointer to v4l2 subdev selection structure
2381 * Return -EINVAL or zero on success
2383 static int vfe_get_selection(struct v4l2_subdev *sd,
2384 struct v4l2_subdev_pad_config *cfg,
2385 struct v4l2_subdev_selection *sel)
2387 struct vfe_line *line = v4l2_get_subdevdata(sd);
2388 struct v4l2_subdev_format fmt = { 0 };
2389 struct v4l2_rect *rect;
2392 if (line->id != VFE_LINE_PIX)
2395 if (sel->pad == MSM_VFE_PAD_SINK)
2396 switch (sel->target) {
2397 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
2399 fmt.which = sel->which;
2400 ret = vfe_get_format(sd, cfg, &fmt);
2406 sel->r.width = fmt.format.width;
2407 sel->r.height = fmt.format.height;
2409 case V4L2_SEL_TGT_COMPOSE:
2410 rect = __vfe_get_compose(line, cfg, sel->which);
2419 else if (sel->pad == MSM_VFE_PAD_SRC)
2420 switch (sel->target) {
2421 case V4L2_SEL_TGT_CROP_BOUNDS:
2422 rect = __vfe_get_compose(line, cfg, sel->which);
2426 sel->r.left = rect->left;
2427 sel->r.top = rect->top;
2428 sel->r.width = rect->width;
2429 sel->r.height = rect->height;
2431 case V4L2_SEL_TGT_CROP:
2432 rect = __vfe_get_crop(line, cfg, sel->which);
2446 * vfe_set_selection - Handle set selection by pads subdev method
2447 * @sd: VFE V4L2 subdevice
2448 * @cfg: V4L2 subdev pad configuration
2449 * @sel: pointer to v4l2 subdev selection structure
2451 * Return -EINVAL or zero on success
2453 int vfe_set_selection(struct v4l2_subdev *sd,
2454 struct v4l2_subdev_pad_config *cfg,
2455 struct v4l2_subdev_selection *sel)
2457 struct vfe_line *line = v4l2_get_subdevdata(sd);
2458 struct v4l2_rect *rect;
2461 if (line->id != VFE_LINE_PIX)
2464 if (sel->target == V4L2_SEL_TGT_COMPOSE &&
2465 sel->pad == MSM_VFE_PAD_SINK) {
2466 struct v4l2_subdev_selection crop = { 0 };
2468 rect = __vfe_get_compose(line, cfg, sel->which);
2472 vfe_try_compose(line, cfg, &sel->r, sel->which);
2475 /* Reset source crop selection */
2476 crop.which = sel->which;
2477 crop.pad = MSM_VFE_PAD_SRC;
2478 crop.target = V4L2_SEL_TGT_CROP;
2480 ret = vfe_set_selection(sd, cfg, &crop);
2481 } else if (sel->target == V4L2_SEL_TGT_CROP &&
2482 sel->pad == MSM_VFE_PAD_SRC) {
2483 struct v4l2_subdev_format fmt = { 0 };
2485 rect = __vfe_get_crop(line, cfg, sel->which);
2489 vfe_try_crop(line, cfg, &sel->r, sel->which);
2492 /* Reset source pad format width and height */
2493 fmt.which = sel->which;
2494 fmt.pad = MSM_VFE_PAD_SRC;
2495 ret = vfe_get_format(sd, cfg, &fmt);
2499 fmt.format.width = rect->width;
2500 fmt.format.height = rect->height;
2501 ret = vfe_set_format(sd, cfg, &fmt);
2510 * vfe_init_formats - Initialize formats on all pads
2511 * @sd: VFE V4L2 subdevice
2512 * @fh: V4L2 subdev file handle
2514 * Initialize all pad formats with default values.
2516 * Return 0 on success or a negative error code otherwise
2518 static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2520 struct v4l2_subdev_format format = {
2521 .pad = MSM_VFE_PAD_SINK,
2522 .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
2523 V4L2_SUBDEV_FORMAT_ACTIVE,
2525 .code = MEDIA_BUS_FMT_UYVY8_2X8,
2531 return vfe_set_format(sd, fh ? fh->pad : NULL, &format);
2535 * msm_vfe_subdev_init - Initialize VFE device structure and resources
2537 * @res: VFE module resources table
2539 * Return 0 on success or a negative error code otherwise
2541 int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res)
2543 struct device *dev = to_device(vfe);
2544 struct platform_device *pdev = to_platform_device(dev);
2546 struct camss *camss = to_camss(vfe);
2552 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
2553 vfe->base = devm_ioremap_resource(dev, r);
2554 if (IS_ERR(vfe->base)) {
2555 dev_err(dev, "could not map memory\n");
2556 return PTR_ERR(vfe->base);
2561 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
2564 dev_err(dev, "missing IRQ\n");
2568 vfe->irq = r->start;
2569 snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d",
2570 dev_name(dev), MSM_VFE_NAME, vfe->id);
2571 ret = devm_request_irq(dev, vfe->irq, vfe_isr,
2572 IRQF_TRIGGER_RISING, vfe->irq_name, vfe);
2574 dev_err(dev, "request_irq failed: %d\n", ret);
2581 while (res->clock[vfe->nclocks])
2584 vfe->clock = devm_kzalloc(dev, vfe->nclocks * sizeof(*vfe->clock),
2589 for (i = 0; i < vfe->nclocks; i++) {
2590 vfe->clock[i] = devm_clk_get(dev, res->clock[i]);
2591 if (IS_ERR(vfe->clock[i]))
2592 return PTR_ERR(vfe->clock[i]);
2594 if (res->clock_rate[i]) {
2595 long clk_rate = clk_round_rate(vfe->clock[i],
2596 res->clock_rate[i]);
2598 dev_err(dev, "clk round rate failed\n");
2601 ret = clk_set_rate(vfe->clock[i], clk_rate);
2603 dev_err(dev, "clk set rate failed\n");
2609 mutex_init(&vfe->power_lock);
2610 vfe->power_count = 0;
2612 mutex_init(&vfe->stream_lock);
2613 vfe->stream_count = 0;
2615 spin_lock_init(&vfe->output_lock);
2618 vfe->reg_update = 0;
2620 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
2621 vfe->line[i].video_out.type =
2622 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2623 vfe->line[i].video_out.camss = camss;
2624 vfe->line[i].id = i;
2625 init_completion(&vfe->line[i].output.sof);
2626 init_completion(&vfe->line[i].output.reg_update);
2629 init_completion(&vfe->reset_complete);
2630 init_completion(&vfe->halt_complete);
2636 * msm_vfe_get_vfe_id - Get VFE HW module id
2637 * @entity: Pointer to VFE media entity structure
2638 * @id: Return CSID HW module id here
2640 void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id)
2642 struct v4l2_subdev *sd;
2643 struct vfe_line *line;
2644 struct vfe_device *vfe;
2646 sd = media_entity_to_v4l2_subdev(entity);
2647 line = v4l2_get_subdevdata(sd);
2654 * msm_vfe_get_vfe_line_id - Get VFE line id by media entity
2655 * @entity: Pointer to VFE media entity structure
2656 * @id: Return VFE line id here
2658 void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id)
2660 struct v4l2_subdev *sd;
2661 struct vfe_line *line;
2663 sd = media_entity_to_v4l2_subdev(entity);
2664 line = v4l2_get_subdevdata(sd);
2670 * vfe_link_setup - Setup VFE connections
2671 * @entity: Pointer to media entity structure
2672 * @local: Pointer to local pad
2673 * @remote: Pointer to remote pad
2674 * @flags: Link flags
2676 * Return 0 on success
2678 static int vfe_link_setup(struct media_entity *entity,
2679 const struct media_pad *local,
2680 const struct media_pad *remote, u32 flags)
2682 if (flags & MEDIA_LNK_FL_ENABLED)
2683 if (media_entity_remote_pad(local))
2689 static const struct v4l2_subdev_core_ops vfe_core_ops = {
2690 .s_power = vfe_set_power,
2693 static const struct v4l2_subdev_video_ops vfe_video_ops = {
2694 .s_stream = vfe_set_stream,
2697 static const struct v4l2_subdev_pad_ops vfe_pad_ops = {
2698 .enum_mbus_code = vfe_enum_mbus_code,
2699 .enum_frame_size = vfe_enum_frame_size,
2700 .get_fmt = vfe_get_format,
2701 .set_fmt = vfe_set_format,
2702 .get_selection = vfe_get_selection,
2703 .set_selection = vfe_set_selection,
2706 static const struct v4l2_subdev_ops vfe_v4l2_ops = {
2707 .core = &vfe_core_ops,
2708 .video = &vfe_video_ops,
2709 .pad = &vfe_pad_ops,
2712 static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = {
2713 .open = vfe_init_formats,
2716 static const struct media_entity_operations vfe_media_ops = {
2717 .link_setup = vfe_link_setup,
2718 .link_validate = v4l2_subdev_link_validate,
2721 static const struct camss_video_ops camss_vfe_video_ops = {
2722 .queue_buffer = vfe_queue_buffer,
2723 .flush_buffers = vfe_flush_buffers,
2726 void msm_vfe_stop_streaming(struct vfe_device *vfe)
2730 for (i = 0; i < ARRAY_SIZE(vfe->line); i++)
2731 msm_video_stop_streaming(&vfe->line[i].video_out);
2735 * msm_vfe_register_entities - Register subdev node for VFE module
2737 * @v4l2_dev: V4L2 device
2739 * Initialize and register a subdev node for the VFE module. Then
2740 * call msm_video_register() to register the video device node which
2741 * will be connected to this subdev node. Then actually create the
2742 * media link between them.
2744 * Return 0 on success or a negative error code otherwise
2746 int msm_vfe_register_entities(struct vfe_device *vfe,
2747 struct v4l2_device *v4l2_dev)
2749 struct device *dev = to_device(vfe);
2750 struct v4l2_subdev *sd;
2751 struct media_pad *pads;
2752 struct camss_video *video_out;
2756 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
2759 sd = &vfe->line[i].subdev;
2760 pads = vfe->line[i].pads;
2761 video_out = &vfe->line[i].video_out;
2763 v4l2_subdev_init(sd, &vfe_v4l2_ops);
2764 sd->internal_ops = &vfe_v4l2_internal_ops;
2765 sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
2766 if (i == VFE_LINE_PIX)
2767 snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s",
2768 MSM_VFE_NAME, vfe->id, "pix");
2770 snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d",
2771 MSM_VFE_NAME, vfe->id, "rdi", i);
2773 v4l2_set_subdevdata(sd, &vfe->line[i]);
2775 ret = vfe_init_formats(sd, NULL);
2777 dev_err(dev, "Failed to init format: %d\n", ret);
2781 pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
2782 pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
2784 sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
2785 sd->entity.ops = &vfe_media_ops;
2786 ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM,
2789 dev_err(dev, "Failed to init media entity: %d\n", ret);
2793 ret = v4l2_device_register_subdev(v4l2_dev, sd);
2795 dev_err(dev, "Failed to register subdev: %d\n", ret);
2796 goto error_reg_subdev;
2799 video_out->ops = &camss_vfe_video_ops;
2800 video_out->bpl_alignment = 8;
2801 video_out->line_based = 0;
2802 if (i == VFE_LINE_PIX) {
2803 video_out->bpl_alignment = 16;
2804 video_out->line_based = 1;
2806 snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d",
2807 MSM_VFE_NAME, vfe->id, "video", i);
2808 ret = msm_video_register(video_out, v4l2_dev, name,
2809 i == VFE_LINE_PIX ? 1 : 0);
2811 dev_err(dev, "Failed to register video node: %d\n",
2813 goto error_reg_video;
2816 ret = media_create_pad_link(
2817 &sd->entity, MSM_VFE_PAD_SRC,
2818 &video_out->vdev.entity, 0,
2819 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
2821 dev_err(dev, "Failed to link %s->%s entities: %d\n",
2822 sd->entity.name, video_out->vdev.entity.name,
2831 msm_video_unregister(video_out);
2834 v4l2_device_unregister_subdev(sd);
2837 media_entity_cleanup(&sd->entity);
2840 for (i--; i >= 0; i--) {
2841 sd = &vfe->line[i].subdev;
2842 video_out = &vfe->line[i].video_out;
2844 msm_video_unregister(video_out);
2845 v4l2_device_unregister_subdev(sd);
2846 media_entity_cleanup(&sd->entity);
2853 * msm_vfe_unregister_entities - Unregister VFE module subdev node
2856 void msm_vfe_unregister_entities(struct vfe_device *vfe)
2860 mutex_destroy(&vfe->power_lock);
2861 mutex_destroy(&vfe->stream_lock);
2863 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
2864 struct v4l2_subdev *sd = &vfe->line[i].subdev;
2865 struct camss_video *video_out = &vfe->line[i].video_out;
2867 msm_video_unregister(video_out);
2868 v4l2_device_unregister_subdev(sd);
2869 media_entity_cleanup(&sd->entity);