1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/property.h>
21 #include <linux/vmalloc.h>
22 #include <media/v4l2-ctrls.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-event.h>
25 #include <media/v4l2-fwnode.h>
26 #include <media/v4l2-ioctl.h>
27 #include <media/videobuf2-dma-sg.h>
29 #include "ipu3-cio2.h"
31 struct ipu3_cio2_fmt {
38 * These are raw formats used in Intel's third generation of
39 * Image Processing Unit known as IPU3.
40 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
41 * last LSB 6 bits unused.
43 static const struct ipu3_cio2_fmt formats[] = {
44 { /* put default entry at beginning */
45 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
46 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
49 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
50 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
53 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
54 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
57 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
58 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
64 * cio2_find_format - lookup color format by fourcc or/and media bus code
65 * @pixelformat: fourcc to match, ignored if null
66 * @mbus_code: media bus code to match, ignored if null
68 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
73 for (i = 0; i < ARRAY_SIZE(formats); i++) {
74 if (pixelformat && *pixelformat != formats[i].fourcc)
76 if (mbus_code && *mbus_code != formats[i].mbus_code)
85 static inline u32 cio2_bytesperline(const unsigned int width)
88 * 64 bytes for every 50 pixels, the line length
89 * in bytes is multiple of 64 (line end alignment).
91 return DIV_ROUND_UP(width, 50) * 64;
94 /**************** FBPT operations ****************/
96 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
98 if (cio2->dummy_lop) {
99 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
100 cio2->dummy_lop, cio2->dummy_lop_bus_addr);
101 cio2->dummy_lop = NULL;
103 if (cio2->dummy_page) {
104 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
105 cio2->dummy_page, cio2->dummy_page_bus_addr);
106 cio2->dummy_page = NULL;
110 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
114 cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev,
116 &cio2->dummy_page_bus_addr,
118 cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev,
120 &cio2->dummy_lop_bus_addr,
122 if (!cio2->dummy_page || !cio2->dummy_lop) {
123 cio2_fbpt_exit_dummy(cio2);
127 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
128 * Initialize each entry to dummy_page bus base address.
130 for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++)
131 cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
136 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
137 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
140 * The CPU first initializes some fields in fbpt, then sets
141 * the VALID bit, this barrier is to ensure that the DMA(device)
142 * does not see the VALID bit enabled before other fields are
143 * initialized; otherwise it could lead to havoc.
148 * Request interrupts for start and completion
149 * Valid bit is applicable only to 1st entry
151 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
152 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
155 /* Initialize fpbt entries to point to dummy frame */
156 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
157 struct cio2_fbpt_entry
158 entry[CIO2_MAX_LOPS])
162 entry[0].first_entry.first_page_offset = 0;
163 entry[1].second_entry.num_of_pages =
164 CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS;
165 entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1;
167 for (i = 0; i < CIO2_MAX_LOPS; i++)
168 entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
170 cio2_fbpt_entry_enable(cio2, entry);
173 /* Initialize fpbt entries to point to a given buffer */
174 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
175 struct cio2_buffer *b,
176 struct cio2_fbpt_entry
177 entry[CIO2_MAX_LOPS])
179 struct vb2_buffer *vb = &b->vbb.vb2_buf;
180 unsigned int length = vb->planes[0].length;
183 entry[0].first_entry.first_page_offset = b->offset;
184 remaining = length + entry[0].first_entry.first_page_offset;
185 entry[1].second_entry.num_of_pages =
186 DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE);
188 * last_page_available_bytes has the offset of the last byte in the
189 * last page which is still accessible by DMA. DMA cannot access
190 * beyond this point. Valid range for this is from 0 to 4095.
191 * 0 indicates 1st byte in the page is DMA accessible.
192 * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page
193 * is available for DMA transfer.
195 entry[1].second_entry.last_page_available_bytes =
196 (remaining & ~PAGE_MASK) ?
197 (remaining & ~PAGE_MASK) - 1 :
202 while (remaining > 0) {
203 entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT;
204 remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE;
210 * The first not meaningful FBPT entry should point to a valid LOP
212 entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
214 cio2_fbpt_entry_enable(cio2, entry);
217 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
219 struct device *dev = &cio2->pci_dev->dev;
221 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
229 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
231 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
234 /**************** CSI2 hardware setup ****************/
237 * The CSI2 receiver has several parameters affecting
238 * the receiver timings. These depend on the MIPI bus frequency
239 * F in Hz (sensor transmitter rate) as follows:
240 * register value = (A/1e9 + B * UI) / COUNT_ACC
242 * UI = 1 / (2 * F) in seconds
243 * COUNT_ACC = counter accuracy in seconds
244 * For IPU3 COUNT_ACC = 0.0625
246 * A and B are coefficients from the table below,
247 * depending whether the register minimum or maximum value is
251 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
252 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
254 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
255 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
256 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
257 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
258 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
259 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
260 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
261 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
263 * We use the minimum values of both A and B.
267 * shift for keeping value range suitable for 32-bit integer arithmetic
269 #define LIMIT_SHIFT 8
271 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
273 const u32 accinv = 16; /* invert of counter resolution */
274 const u32 uiinv = 500000000; /* 1e9 / 2 */
277 freq >>= LIMIT_SHIFT;
279 if (WARN_ON(freq <= 0 || freq > S32_MAX))
282 * b could be 0, -2 or -8, so |accinv * b| is always
283 * less than (1 << ds) and thus |r| < 500000000.
285 r = accinv * b * (uiinv >> LIMIT_SHIFT);
287 /* max value of a is 95 */
293 /* Calculate the the delay value for termination enable of clock lane HS Rx */
294 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
295 struct cio2_csi2_timing *timing)
297 struct device *dev = &cio2->pci_dev->dev;
298 struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
299 struct v4l2_ctrl *link_freq;
306 link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ);
308 dev_err(dev, "failed to find LINK_FREQ\n");
312 qm.index = v4l2_ctrl_g_ctrl(link_freq);
313 r = v4l2_querymenu(q->sensor->ctrl_handler, &qm);
315 dev_err(dev, "failed to get menu item\n");
320 dev_err(dev, "error invalid link_freq\n");
325 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
326 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
328 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
329 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
330 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
332 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
333 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
334 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
336 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
337 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
338 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
340 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
342 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
343 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
344 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
345 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
350 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
352 static const int NUM_VCS = 4;
353 static const int SID; /* Stream id */
354 static const int ENTRY;
355 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
356 CIO2_FBPT_SUBENTRY_UNIT);
357 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
358 const struct ipu3_cio2_fmt *fmt;
359 void __iomem *const base = cio2->base;
360 u8 lanes, csi2bus = q->csi2.port;
361 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
362 struct cio2_csi2_timing timing;
365 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
369 lanes = q->csi2.lanes;
371 r = cio2_csi2_calc_timing(cio2, q, &timing);
375 writel(timing.clk_termen, q->csi_rx_base +
376 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
377 writel(timing.clk_settle, q->csi_rx_base +
378 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
380 for (i = 0; i < lanes; i++) {
381 writel(timing.dat_termen, q->csi_rx_base +
382 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
383 writel(timing.dat_settle, q->csi_rx_base +
384 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
387 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
388 CIO2_PBM_WMCTRL1_MID1_2CK |
389 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
390 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
391 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
392 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
393 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
394 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
395 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
396 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
397 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
398 CIO2_PBM_ARB_CTRL_LE_EN |
399 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
400 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
401 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
402 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
403 base + CIO2_REG_PBM_ARB_CTRL);
404 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
405 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
406 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
407 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
409 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
410 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
412 /* Configure MIPI backend */
413 for (i = 0; i < NUM_VCS; i++)
414 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
416 /* There are 16 short packet LUT entry */
417 for (i = 0; i < 16; i++)
418 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
419 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
420 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
421 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
423 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
424 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
425 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
426 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
427 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
428 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
430 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
431 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
432 base + CIO2_REG_INT_EN);
434 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
435 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
436 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
437 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
438 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
439 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
440 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
441 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
442 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
443 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
445 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
446 writel(CIO2_CGC_PRIM_TGE |
450 CIO2_CGC_CSI2_INTERFRAME_TGE |
451 CIO2_CGC_CSI2_PORT_DCGE |
456 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
457 CIO2_CGC_CSI_CLKGATE_HOLDOFF
458 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
459 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
460 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
461 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
462 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
463 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
464 base + CIO2_REG_LTRVAL01);
465 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
466 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
467 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
468 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
469 base + CIO2_REG_LTRVAL23);
471 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
472 writel(0, base + CIO2_REG_CDMABA(i));
473 writel(0, base + CIO2_REG_CDMAC0(i));
474 writel(0, base + CIO2_REG_CDMAC1(i));
478 writel(q->fbpt_bus_addr >> PAGE_SHIFT,
479 base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
481 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
482 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
483 CIO2_CDMAC0_DMA_INTR_ON_FE |
484 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
486 CIO2_CDMAC0_DMA_INTR_ON_FS |
487 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
489 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
490 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
492 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
494 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
495 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
496 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
497 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
498 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
500 /* Clear interrupts */
501 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
502 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
503 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
504 writel(~0, base + CIO2_REG_INT_STS);
506 /* Enable devices, starting from the last device in the pipe */
507 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
508 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
513 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
515 void __iomem *base = cio2->base;
516 unsigned int i, maxloops = 1000;
518 /* Disable CSI receiver and MIPI backend devices */
519 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
520 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
521 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
522 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
525 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
527 if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
528 CIO2_CDMAC0_DMA_HALTED)
530 usleep_range(1000, 2000);
531 } while (--maxloops);
533 dev_err(&cio2->pci_dev->dev,
534 "DMA %i can not be halted\n", CIO2_DMA_CHAN);
536 for (i = 0; i < CIO2_NUM_PORTS; i++) {
537 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
538 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
539 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
540 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
544 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
546 struct device *dev = &cio2->pci_dev->dev;
547 struct cio2_queue *q = cio2->cur_queue;
548 int buffers_found = 0;
549 u64 ns = ktime_get_ns();
551 if (dma_chan >= CIO2_QUEUES) {
552 dev_err(dev, "bad DMA channel %i\n", dma_chan);
556 /* Find out which buffer(s) are ready */
558 struct cio2_fbpt_entry *const entry =
559 &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
560 struct cio2_buffer *b;
562 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)
565 b = q->bufs[q->bufs_first];
567 unsigned int bytes = entry[1].second_entry.num_of_bytes;
569 q->bufs[q->bufs_first] = NULL;
570 atomic_dec(&q->bufs_queued);
571 dev_dbg(&cio2->pci_dev->dev,
572 "buffer %i done\n", b->vbb.vb2_buf.index);
574 b->vbb.vb2_buf.timestamp = ns;
575 b->vbb.field = V4L2_FIELD_NONE;
576 b->vbb.sequence = atomic_read(&q->frame_sequence);
577 if (b->vbb.vb2_buf.planes[0].length != bytes)
578 dev_warn(dev, "buffer length is %d received %d\n",
579 b->vbb.vb2_buf.planes[0].length,
581 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
583 atomic_inc(&q->frame_sequence);
584 cio2_fbpt_entry_init_dummy(cio2, entry);
585 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
589 if (buffers_found == 0)
590 dev_warn(&cio2->pci_dev->dev,
591 "no ready buffers found on DMA channel %u\n",
595 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
598 * For the user space camera control algorithms it is essential
599 * to know when the reception of a frame has begun. That's often
600 * the best timing information to get from the hardware.
602 struct v4l2_event event = {
603 .type = V4L2_EVENT_FRAME_SYNC,
604 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
607 v4l2_event_queue(q->subdev.devnode, &event);
610 static const char *const cio2_irq_errs[] = {
611 "single packet header error corrected",
612 "multiple packet header errors detected",
613 "payload checksum (CRC) error",
615 "reserved short packet data type detected",
616 "reserved long packet data type detected",
617 "incomplete long packet detected",
620 "DPHY start of transmission error",
621 "DPHY synchronization error",
623 "escape mode trigger event",
624 "escape mode ultra-low power state for data lane(s)",
625 "escape mode ultra-low power state exit for clock lane",
626 "inter-frame short packet discarded",
627 "inter-frame long packet discarded",
628 "non-matching Long Packet stalled",
631 static const char *const cio2_port_errs[] = {
633 "DPHY not recoverable",
634 "ECC not recoverable",
641 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
643 void __iomem *const base = cio2->base;
644 struct device *dev = &cio2->pci_dev->dev;
646 if (int_status & CIO2_INT_IOOE) {
648 * Interrupt on Output Error:
649 * 1) SRAM is full and FS received, or
650 * 2) An invalid bit detected by DMA.
652 u32 oe_status, oe_clear;
654 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
655 oe_status = oe_clear;
657 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
658 dev_err(dev, "DMA output error: 0x%x\n",
659 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
660 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
661 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
663 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
664 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
665 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
666 >> CIO2_INT_EXT_OE_OES_SHIFT);
667 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
669 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
671 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
673 int_status &= ~CIO2_INT_IOOE;
676 if (int_status & CIO2_INT_IOC_MASK) {
677 /* DMA IO done -- frame ready */
681 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
682 if (int_status & CIO2_INT_IOC(d)) {
683 clr |= CIO2_INT_IOC(d);
684 cio2_buffer_done(cio2, d);
689 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
690 /* DMA IO starts or reached specified line */
694 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
695 if (int_status & CIO2_INT_IOS_IOLN(d)) {
696 clr |= CIO2_INT_IOS_IOLN(d);
697 if (d == CIO2_DMA_CHAN)
698 cio2_queue_event_sof(cio2,
704 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
705 /* CSI2 receiver (error) interrupt */
706 u32 ie_status, ie_clear;
709 ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
710 ie_status = ie_clear;
712 for (port = 0; port < CIO2_NUM_PORTS; port++) {
713 u32 port_status = (ie_status >> (port * 8)) & 0xff;
714 u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
715 void __iomem *const csi_rx_base =
716 base + CIO2_REG_PIPE_BASE(port);
719 while (port_status & err_mask) {
720 i = ffs(port_status) - 1;
721 dev_err(dev, "port %i error %s\n",
722 port, cio2_port_errs[i]);
723 ie_status &= ~BIT(port * 8 + i);
724 port_status &= ~BIT(i);
727 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
728 u32 csi2_status, csi2_clear;
730 csi2_status = readl(csi_rx_base +
731 CIO2_REG_IRQCTRL_STATUS);
732 csi2_clear = csi2_status;
734 BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
736 while (csi2_status & err_mask) {
737 i = ffs(csi2_status) - 1;
739 "CSI-2 receiver port %i: %s\n",
740 port, cio2_irq_errs[i]);
741 csi2_status &= ~BIT(i);
745 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
748 "unknown CSI2 error 0x%x on port %i\n",
751 ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
755 writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
757 dev_warn(dev, "unknown interrupt 0x%x on IE\n",
760 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
764 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
767 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
769 struct cio2_device *cio2 = cio2_ptr;
770 void __iomem *const base = cio2->base;
771 struct device *dev = &cio2->pci_dev->dev;
774 int_status = readl(base + CIO2_REG_INT_STS);
775 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
780 writel(int_status, base + CIO2_REG_INT_STS);
781 cio2_irq_handle_once(cio2, int_status);
782 int_status = readl(base + CIO2_REG_INT_STS);
784 dev_dbg(dev, "pending status 0x%x\n", int_status);
785 } while (int_status);
790 /**************** Videobuf2 interface ****************/
792 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
793 enum vb2_buffer_state state)
797 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
799 atomic_dec(&q->bufs_queued);
800 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
806 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
807 unsigned int *num_buffers,
808 unsigned int *num_planes,
809 unsigned int sizes[],
810 struct device *alloc_devs[])
812 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
813 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
816 *num_planes = q->format.num_planes;
818 for (i = 0; i < *num_planes; ++i) {
819 sizes[i] = q->format.plane_fmt[i].sizeimage;
820 alloc_devs[i] = &cio2->pci_dev->dev;
823 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
825 /* Initialize buffer queue */
826 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
828 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
830 atomic_set(&q->bufs_queued, 0);
837 /* Called after each buffer is allocated */
838 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
840 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
841 struct device *dev = &cio2->pci_dev->dev;
842 struct cio2_buffer *b =
843 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
844 static const unsigned int entries_per_page =
845 CIO2_PAGE_SIZE / sizeof(u32);
846 unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE);
847 unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page);
849 struct sg_dma_page_iter sg_iter;
852 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
853 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
854 vb->planes[0].length);
855 return -ENOSPC; /* Should never happen */
858 memset(b->lop, 0, sizeof(b->lop));
859 /* Allocate LOP table */
860 for (i = 0; i < lops; i++) {
861 b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE,
862 &b->lop_bus_addr[i], GFP_KERNEL);
868 sg = vb2_dma_sg_plane_desc(vb, 0);
872 if (sg->nents && sg->sgl)
873 b->offset = sg->sgl->offset;
876 for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) {
879 b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT;
881 if (j == entries_per_page) {
887 b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
890 for (i--; i >= 0; i--)
891 dma_free_coherent(dev, CIO2_PAGE_SIZE,
892 b->lop[i], b->lop_bus_addr[i]);
896 /* Transfer buffer ownership to cio2 */
897 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
899 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
900 struct cio2_queue *q =
901 container_of(vb->vb2_queue, struct cio2_queue, vbq);
902 struct cio2_buffer *b =
903 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
904 struct cio2_fbpt_entry *entry;
906 unsigned int i, j, next = q->bufs_next;
907 int bufs_queued = atomic_inc_return(&q->bufs_queued);
910 dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
913 * This code queues the buffer to the CIO2 DMA engine, which starts
914 * running once streaming has started. It is possible that this code
915 * gets pre-empted due to increased CPU load. Upon this, the driver
916 * does not get an opportunity to queue new buffers to the CIO2 DMA
917 * engine. When the DMA engine encounters an FBPT entry without the
918 * VALID bit set, the DMA engine halts, which requires a restart of
919 * the DMA engine and sensor, to continue streaming.
920 * This is not desired and is highly unlikely given that there are
921 * 32 FBPT entries that the DMA engine needs to process, to run into
922 * an FBPT entry, without the VALID bit set. We try to mitigate this
923 * by disabling interrupts for the duration of this queueing.
925 local_irq_save(flags);
927 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
928 >> CIO2_CDMARI_FBPT_RP_SHIFT)
929 & CIO2_CDMARI_FBPT_RP_MASK;
932 * fbpt_rp is the fbpt entry that the dma is currently working
933 * on, but since it could jump to next entry at any time,
934 * assume that we might already be there.
936 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
938 if (bufs_queued <= 1 || fbpt_rp == next)
939 /* Buffers were drained */
940 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
942 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
944 * We have allocated CIO2_MAX_BUFFERS circularly for the
945 * hw, the user has requested N buffer queue. The driver
946 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
947 * user queues a buffer, there necessarily is a free buffer.
949 if (!q->bufs[next]) {
951 entry = &q->fbpt[next * CIO2_MAX_LOPS];
952 cio2_fbpt_entry_init_buf(cio2, b, entry);
953 local_irq_restore(flags);
954 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
955 for (j = 0; j < vb->num_planes; j++)
956 vb2_set_plane_payload(vb, j,
957 q->format.plane_fmt[j].sizeimage);
961 dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
962 next = (next + 1) % CIO2_MAX_BUFFERS;
965 local_irq_restore(flags);
966 dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
967 atomic_dec(&q->bufs_queued);
968 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
971 /* Called when each buffer is freed */
972 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
974 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
975 struct cio2_buffer *b =
976 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
980 for (i = 0; i < CIO2_MAX_LOPS; i++) {
982 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
983 b->lop[i], b->lop_bus_addr[i]);
987 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
989 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
990 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
994 atomic_set(&q->frame_sequence, 0);
996 r = pm_runtime_get_sync(&cio2->pci_dev->dev);
998 dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
999 pm_runtime_put_noidle(&cio2->pci_dev->dev);
1003 r = media_pipeline_start(&q->vdev.entity, &q->pipe);
1007 r = cio2_hw_init(cio2, q);
1011 /* Start streaming on sensor */
1012 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1014 goto fail_csi2_subdev;
1016 cio2->streaming = true;
1021 cio2_hw_exit(cio2, q);
1023 media_pipeline_stop(&q->vdev.entity);
1025 dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
1026 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1027 pm_runtime_put(&cio2->pci_dev->dev);
1032 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1034 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1035 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1037 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1038 dev_err(&cio2->pci_dev->dev,
1039 "failed to stop sensor streaming\n");
1041 cio2_hw_exit(cio2, q);
1042 synchronize_irq(cio2->pci_dev->irq);
1043 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1044 media_pipeline_stop(&q->vdev.entity);
1045 pm_runtime_put(&cio2->pci_dev->dev);
1046 cio2->streaming = false;
1049 static const struct vb2_ops cio2_vb2_ops = {
1050 .buf_init = cio2_vb2_buf_init,
1051 .buf_queue = cio2_vb2_buf_queue,
1052 .buf_cleanup = cio2_vb2_buf_cleanup,
1053 .queue_setup = cio2_vb2_queue_setup,
1054 .start_streaming = cio2_vb2_start_streaming,
1055 .stop_streaming = cio2_vb2_stop_streaming,
1056 .wait_prepare = vb2_ops_wait_prepare,
1057 .wait_finish = vb2_ops_wait_finish,
1060 /**************** V4L2 interface ****************/
1062 static int cio2_v4l2_querycap(struct file *file, void *fh,
1063 struct v4l2_capability *cap)
1065 struct cio2_device *cio2 = video_drvdata(file);
1067 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1068 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1069 snprintf(cap->bus_info, sizeof(cap->bus_info),
1070 "PCI:%s", pci_name(cio2->pci_dev));
1075 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1076 struct v4l2_fmtdesc *f)
1078 if (f->index >= ARRAY_SIZE(formats))
1081 f->pixelformat = formats[f->index].fourcc;
1086 /* The format is validated in cio2_video_link_validate() */
1087 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1089 struct cio2_queue *q = file_to_cio2_queue(file);
1091 f->fmt.pix_mp = q->format;
1096 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1098 const struct ipu3_cio2_fmt *fmt;
1099 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1101 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1105 /* Only supports up to 4224x3136 */
1106 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1107 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1108 if (mpix->height > CIO2_IMAGE_MAX_LENGTH)
1109 mpix->height = CIO2_IMAGE_MAX_LENGTH;
1111 mpix->num_planes = 1;
1112 mpix->pixelformat = fmt->fourcc;
1113 mpix->colorspace = V4L2_COLORSPACE_RAW;
1114 mpix->field = V4L2_FIELD_NONE;
1115 memset(mpix->reserved, 0, sizeof(mpix->reserved));
1116 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1117 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1119 memset(mpix->plane_fmt[0].reserved, 0,
1120 sizeof(mpix->plane_fmt[0].reserved));
1123 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1124 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1125 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1130 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1132 struct cio2_queue *q = file_to_cio2_queue(file);
1134 cio2_v4l2_try_fmt(file, fh, f);
1135 q->format = f->fmt.pix_mp;
1141 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1143 if (input->index > 0)
1146 strscpy(input->name, "camera", sizeof(input->name));
1147 input->type = V4L2_INPUT_TYPE_CAMERA;
1153 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1161 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1163 return input == 0 ? 0 : -EINVAL;
1166 static const struct v4l2_file_operations cio2_v4l2_fops = {
1167 .owner = THIS_MODULE,
1168 .unlocked_ioctl = video_ioctl2,
1169 .open = v4l2_fh_open,
1170 .release = vb2_fop_release,
1171 .poll = vb2_fop_poll,
1172 .mmap = vb2_fop_mmap,
1175 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1176 .vidioc_querycap = cio2_v4l2_querycap,
1177 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1178 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1179 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1180 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1181 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1182 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1183 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1184 .vidioc_querybuf = vb2_ioctl_querybuf,
1185 .vidioc_qbuf = vb2_ioctl_qbuf,
1186 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1187 .vidioc_streamon = vb2_ioctl_streamon,
1188 .vidioc_streamoff = vb2_ioctl_streamoff,
1189 .vidioc_expbuf = vb2_ioctl_expbuf,
1190 .vidioc_enum_input = cio2_video_enum_input,
1191 .vidioc_g_input = cio2_video_g_input,
1192 .vidioc_s_input = cio2_video_s_input,
1195 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1197 struct v4l2_event_subscription *sub)
1199 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1202 /* Line number. For now only zero accepted. */
1206 return v4l2_event_subscribe(fh, sub, 0, NULL);
1209 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1211 struct v4l2_mbus_framefmt *format;
1212 const struct v4l2_mbus_framefmt fmt_default = {
1215 .code = formats[0].mbus_code,
1216 .field = V4L2_FIELD_NONE,
1217 .colorspace = V4L2_COLORSPACE_RAW,
1218 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1219 .quantization = V4L2_QUANTIZATION_DEFAULT,
1220 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1223 /* Initialize try_fmt */
1224 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
1225 *format = fmt_default;
1228 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
1229 *format = fmt_default;
1235 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1236 * @sd : pointer to v4l2 subdev structure
1237 * @cfg: V4L2 subdev pad config
1238 * @fmt: pointer to v4l2 subdev format structure
1239 * return -EINVAL or zero on success
1241 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1242 struct v4l2_subdev_pad_config *cfg,
1243 struct v4l2_subdev_format *fmt)
1245 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1246 struct v4l2_subdev_format format;
1249 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1250 fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1254 if (fmt->pad == CIO2_PAD_SINK) {
1255 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1256 ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
1261 /* update colorspace etc */
1262 q->subdev_fmt.colorspace = format.format.colorspace;
1263 q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc;
1264 q->subdev_fmt.quantization = format.format.quantization;
1265 q->subdev_fmt.xfer_func = format.format.xfer_func;
1268 fmt->format = q->subdev_fmt;
1274 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1275 * @sd : pointer to v4l2 subdev structure
1276 * @cfg: V4L2 subdev pad config
1277 * @fmt: pointer to v4l2 subdev format structure
1278 * return -EINVAL or zero on success
1280 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1281 struct v4l2_subdev_pad_config *cfg,
1282 struct v4l2_subdev_format *fmt)
1284 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1287 * Only allow setting sink pad format;
1288 * source always propagates from sink
1290 if (fmt->pad == CIO2_PAD_SOURCE)
1291 return cio2_subdev_get_fmt(sd, cfg, fmt);
1293 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1294 *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
1296 /* It's the sink, allow changing frame size */
1297 q->subdev_fmt.width = fmt->format.width;
1298 q->subdev_fmt.height = fmt->format.height;
1299 q->subdev_fmt.code = fmt->format.code;
1300 fmt->format = q->subdev_fmt;
1306 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1307 struct v4l2_subdev_pad_config *cfg,
1308 struct v4l2_subdev_mbus_code_enum *code)
1310 if (code->index >= ARRAY_SIZE(formats))
1313 code->code = formats[code->index].mbus_code;
1317 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1318 struct v4l2_subdev_format *fmt)
1320 if (is_media_entity_v4l2_subdev(pad->entity)) {
1321 struct v4l2_subdev *sd =
1322 media_entity_to_v4l2_subdev(pad->entity);
1324 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1325 fmt->pad = pad->index;
1326 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1332 static int cio2_video_link_validate(struct media_link *link)
1334 struct video_device *vd = container_of(link->sink->entity,
1335 struct video_device, entity);
1336 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1337 struct cio2_device *cio2 = video_get_drvdata(vd);
1338 struct v4l2_subdev_format source_fmt;
1341 if (!media_entity_remote_pad(link->sink->entity->pads)) {
1342 dev_info(&cio2->pci_dev->dev,
1343 "video node %s pad not connected\n", vd->name);
1347 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1351 if (source_fmt.format.width != q->format.width ||
1352 source_fmt.format.height != q->format.height) {
1353 dev_err(&cio2->pci_dev->dev,
1354 "Wrong width or height %ux%u (%ux%u expected)\n",
1355 q->format.width, q->format.height,
1356 source_fmt.format.width, source_fmt.format.height);
1360 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1366 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1367 .subscribe_event = cio2_subdev_subscribe_event,
1368 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1371 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1372 .open = cio2_subdev_open,
1375 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1376 .link_validate = v4l2_subdev_link_validate_default,
1377 .get_fmt = cio2_subdev_get_fmt,
1378 .set_fmt = cio2_subdev_set_fmt,
1379 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1382 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1383 .core = &cio2_subdev_core_ops,
1384 .pad = &cio2_subdev_pad_ops,
1387 /******* V4L2 sub-device asynchronous registration callbacks***********/
1389 struct sensor_async_subdev {
1390 struct v4l2_async_subdev asd;
1391 struct csi2_bus_info csi2;
1394 /* The .bound() notifier callback when a match is found */
1395 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1396 struct v4l2_subdev *sd,
1397 struct v4l2_async_subdev *asd)
1399 struct cio2_device *cio2 = container_of(notifier,
1400 struct cio2_device, notifier);
1401 struct sensor_async_subdev *s_asd = container_of(asd,
1402 struct sensor_async_subdev, asd);
1403 struct cio2_queue *q;
1405 if (cio2->queue[s_asd->csi2.port].sensor)
1408 q = &cio2->queue[s_asd->csi2.port];
1410 q->csi2 = s_asd->csi2;
1412 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1417 /* The .unbind callback */
1418 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1419 struct v4l2_subdev *sd,
1420 struct v4l2_async_subdev *asd)
1422 struct cio2_device *cio2 = container_of(notifier,
1423 struct cio2_device, notifier);
1424 struct sensor_async_subdev *s_asd = container_of(asd,
1425 struct sensor_async_subdev, asd);
1427 cio2->queue[s_asd->csi2.port].sensor = NULL;
1430 /* .complete() is called after all subdevices have been located */
1431 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1433 struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1435 struct sensor_async_subdev *s_asd;
1436 struct v4l2_async_subdev *asd;
1437 struct cio2_queue *q;
1441 list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1442 s_asd = container_of(asd, struct sensor_async_subdev, asd);
1443 q = &cio2->queue[s_asd->csi2.port];
1445 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1446 if (q->sensor->entity.pads[pad].flags &
1447 MEDIA_PAD_FL_SOURCE)
1450 if (pad == q->sensor->entity.num_pads) {
1451 dev_err(&cio2->pci_dev->dev,
1452 "failed to find src pad for %s\n",
1457 ret = media_create_pad_link(
1458 &q->sensor->entity, pad,
1459 &q->subdev.entity, CIO2_PAD_SINK,
1462 dev_err(&cio2->pci_dev->dev,
1463 "failed to create link for %s\n",
1469 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1472 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1473 .bound = cio2_notifier_bound,
1474 .unbind = cio2_notifier_unbind,
1475 .complete = cio2_notifier_complete,
1478 static int cio2_parse_firmware(struct cio2_device *cio2)
1483 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1484 struct v4l2_fwnode_endpoint vep = {
1485 .bus_type = V4L2_MBUS_CSI2_DPHY
1487 struct sensor_async_subdev *s_asd = NULL;
1488 struct fwnode_handle *ep;
1490 ep = fwnode_graph_get_endpoint_by_id(
1491 dev_fwnode(&cio2->pci_dev->dev), i, 0,
1492 FWNODE_GRAPH_ENDPOINT_NEXT);
1497 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1501 s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL);
1507 s_asd->csi2.port = vep.base.port;
1508 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1510 ret = v4l2_async_notifier_add_fwnode_remote_subdev(
1511 &cio2->notifier, ep, &s_asd->asd);
1515 fwnode_handle_put(ep);
1520 fwnode_handle_put(ep);
1526 * Proceed even without sensors connected to allow the device to
1529 cio2->notifier.ops = &cio2_async_ops;
1530 ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1532 dev_err(&cio2->pci_dev->dev,
1533 "failed to register async notifier : %d\n", ret);
1538 /**************** Queue initialization ****************/
1539 static const struct media_entity_operations cio2_media_ops = {
1540 .link_validate = v4l2_subdev_link_validate,
1543 static const struct media_entity_operations cio2_video_entity_ops = {
1544 .link_validate = cio2_video_link_validate,
1547 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1549 static const u32 default_width = 1936;
1550 static const u32 default_height = 1096;
1551 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1553 struct video_device *vdev = &q->vdev;
1554 struct vb2_queue *vbq = &q->vbq;
1555 struct v4l2_subdev *subdev = &q->subdev;
1556 struct v4l2_mbus_framefmt *fmt;
1559 /* Initialize miscellaneous variables */
1560 mutex_init(&q->lock);
1562 /* Initialize formats to default values */
1563 fmt = &q->subdev_fmt;
1564 fmt->width = default_width;
1565 fmt->height = default_height;
1566 fmt->code = dflt_fmt.mbus_code;
1567 fmt->field = V4L2_FIELD_NONE;
1569 q->format.width = default_width;
1570 q->format.height = default_height;
1571 q->format.pixelformat = dflt_fmt.fourcc;
1572 q->format.colorspace = V4L2_COLORSPACE_RAW;
1573 q->format.field = V4L2_FIELD_NONE;
1574 q->format.num_planes = 1;
1575 q->format.plane_fmt[0].bytesperline =
1576 cio2_bytesperline(q->format.width);
1577 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1580 /* Initialize fbpt */
1581 r = cio2_fbpt_init(cio2, q);
1585 /* Initialize media entities */
1586 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1587 MEDIA_PAD_FL_MUST_CONNECT;
1588 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1589 subdev->entity.ops = &cio2_media_ops;
1590 subdev->internal_ops = &cio2_subdev_internal_ops;
1591 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1593 dev_err(&cio2->pci_dev->dev,
1594 "failed initialize subdev media entity (%d)\n", r);
1595 goto fail_subdev_media_entity;
1598 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1599 vdev->entity.ops = &cio2_video_entity_ops;
1600 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1602 dev_err(&cio2->pci_dev->dev,
1603 "failed initialize videodev media entity (%d)\n", r);
1604 goto fail_vdev_media_entity;
1607 /* Initialize subdev */
1608 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1609 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1610 subdev->owner = THIS_MODULE;
1611 snprintf(subdev->name, sizeof(subdev->name),
1612 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1613 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1614 v4l2_set_subdevdata(subdev, cio2);
1615 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1617 dev_err(&cio2->pci_dev->dev,
1618 "failed initialize subdev (%d)\n", r);
1622 /* Initialize vbq */
1623 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1624 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1625 vbq->ops = &cio2_vb2_ops;
1626 vbq->mem_ops = &vb2_dma_sg_memops;
1627 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1628 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1629 vbq->min_buffers_needed = 1;
1630 vbq->drv_priv = cio2;
1631 vbq->lock = &q->lock;
1632 r = vb2_queue_init(vbq);
1634 dev_err(&cio2->pci_dev->dev,
1635 "failed to initialize videobuf2 queue (%d)\n", r);
1639 /* Initialize vdev */
1640 snprintf(vdev->name, sizeof(vdev->name),
1641 "%s %td", CIO2_NAME, q - cio2->queue);
1642 vdev->release = video_device_release_empty;
1643 vdev->fops = &cio2_v4l2_fops;
1644 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1645 vdev->lock = &cio2->lock;
1646 vdev->v4l2_dev = &cio2->v4l2_dev;
1647 vdev->queue = &q->vbq;
1648 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1649 video_set_drvdata(vdev, cio2);
1650 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1652 dev_err(&cio2->pci_dev->dev,
1653 "failed to register video device (%d)\n", r);
1657 /* Create link from CIO2 subdev to output node */
1658 r = media_create_pad_link(
1659 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1660 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1667 video_unregister_device(&q->vdev);
1669 vb2_queue_release(vbq);
1671 v4l2_device_unregister_subdev(subdev);
1673 media_entity_cleanup(&vdev->entity);
1674 fail_vdev_media_entity:
1675 media_entity_cleanup(&subdev->entity);
1676 fail_subdev_media_entity:
1677 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1679 mutex_destroy(&q->lock);
1684 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1686 video_unregister_device(&q->vdev);
1687 media_entity_cleanup(&q->vdev.entity);
1688 vb2_queue_release(&q->vbq);
1689 v4l2_device_unregister_subdev(&q->subdev);
1690 media_entity_cleanup(&q->subdev.entity);
1691 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1692 mutex_destroy(&q->lock);
1695 static int cio2_queues_init(struct cio2_device *cio2)
1699 for (i = 0; i < CIO2_QUEUES; i++) {
1700 r = cio2_queue_init(cio2, &cio2->queue[i]);
1705 if (i == CIO2_QUEUES)
1708 for (i--; i >= 0; i--)
1709 cio2_queue_exit(cio2, &cio2->queue[i]);
1714 static void cio2_queues_exit(struct cio2_device *cio2)
1718 for (i = 0; i < CIO2_QUEUES; i++)
1719 cio2_queue_exit(cio2, &cio2->queue[i]);
1722 /**************** PCI interface ****************/
1724 static int cio2_pci_config_setup(struct pci_dev *dev)
1727 int r = pci_enable_msi(dev);
1730 dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
1734 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1735 pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1736 PCI_COMMAND_INTX_DISABLE;
1737 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1742 static int cio2_pci_probe(struct pci_dev *pci_dev,
1743 const struct pci_device_id *id)
1745 struct cio2_device *cio2;
1746 void __iomem *const *iomap;
1749 cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1752 cio2->pci_dev = pci_dev;
1754 r = pcim_enable_device(pci_dev);
1756 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1760 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1761 pci_dev->device, pci_dev->revision);
1763 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1765 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1769 iomap = pcim_iomap_table(pci_dev);
1771 dev_err(&pci_dev->dev, "failed to iomap table\n");
1775 cio2->base = iomap[CIO2_PCI_BAR];
1777 pci_set_drvdata(pci_dev, cio2);
1779 pci_set_master(pci_dev);
1781 r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1783 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1787 r = cio2_pci_config_setup(pci_dev);
1791 r = cio2_fbpt_init_dummy(cio2);
1795 mutex_init(&cio2->lock);
1797 cio2->media_dev.dev = &cio2->pci_dev->dev;
1798 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1799 sizeof(cio2->media_dev.model));
1800 snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1801 "PCI:%s", pci_name(cio2->pci_dev));
1802 cio2->media_dev.hw_revision = 0;
1804 media_device_init(&cio2->media_dev);
1805 r = media_device_register(&cio2->media_dev);
1807 goto fail_mutex_destroy;
1809 cio2->v4l2_dev.mdev = &cio2->media_dev;
1810 r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1812 dev_err(&pci_dev->dev,
1813 "failed to register V4L2 device (%d)\n", r);
1814 goto fail_media_device_unregister;
1817 r = cio2_queues_init(cio2);
1819 goto fail_v4l2_device_unregister;
1821 v4l2_async_notifier_init(&cio2->notifier);
1823 /* Register notifier for subdevices we care */
1824 r = cio2_parse_firmware(cio2);
1826 goto fail_clean_notifier;
1828 r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1829 IRQF_SHARED, CIO2_NAME, cio2);
1831 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1832 goto fail_clean_notifier;
1835 pm_runtime_put_noidle(&pci_dev->dev);
1836 pm_runtime_allow(&pci_dev->dev);
1840 fail_clean_notifier:
1841 v4l2_async_notifier_unregister(&cio2->notifier);
1842 v4l2_async_notifier_cleanup(&cio2->notifier);
1843 cio2_queues_exit(cio2);
1844 fail_v4l2_device_unregister:
1845 v4l2_device_unregister(&cio2->v4l2_dev);
1846 fail_media_device_unregister:
1847 media_device_unregister(&cio2->media_dev);
1848 media_device_cleanup(&cio2->media_dev);
1850 mutex_destroy(&cio2->lock);
1851 cio2_fbpt_exit_dummy(cio2);
1856 static void cio2_pci_remove(struct pci_dev *pci_dev)
1858 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1860 media_device_unregister(&cio2->media_dev);
1861 v4l2_async_notifier_unregister(&cio2->notifier);
1862 v4l2_async_notifier_cleanup(&cio2->notifier);
1863 cio2_queues_exit(cio2);
1864 cio2_fbpt_exit_dummy(cio2);
1865 v4l2_device_unregister(&cio2->v4l2_dev);
1866 media_device_cleanup(&cio2->media_dev);
1867 mutex_destroy(&cio2->lock);
1870 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1872 struct pci_dev *pci_dev = to_pci_dev(dev);
1873 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1874 void __iomem *const base = cio2->base;
1877 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1878 dev_dbg(dev, "cio2 runtime suspend.\n");
1880 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1881 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1882 pm |= CIO2_PMCSR_D3;
1883 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1888 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1890 struct pci_dev *pci_dev = to_pci_dev(dev);
1891 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1892 void __iomem *const base = cio2->base;
1895 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1896 dev_dbg(dev, "cio2 runtime resume.\n");
1898 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1899 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1900 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1906 * Helper function to advance all the elements of a circular buffer by "start"
1909 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1915 { start, elems - 1 },
1918 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1920 /* Loop as long as we have out-of-place entries */
1921 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1925 * Find the number of entries that can be arranged on this
1928 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1930 /* Swap the entries in two parts of the array. */
1931 for (i = 0; i < size0; i++) {
1932 u8 *d = ptr + elem_size * (arr[1].begin + i);
1933 u8 *s = ptr + elem_size * (arr[0].begin + i);
1936 for (j = 0; j < elem_size; j++)
1940 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1941 /* The end of the first array remains unarranged. */
1942 arr[0].begin += size0;
1945 * The first array is fully arranged so we proceed
1946 * handling the next one.
1948 arr[0].begin = arr[1].begin;
1949 arr[0].end = arr[1].begin + size0 - 1;
1950 arr[1].begin += size0;
1955 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1959 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1960 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1964 if (i == CIO2_MAX_BUFFERS)
1968 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1969 CIO2_MAX_BUFFERS, j);
1970 arrange(q->bufs, sizeof(struct cio2_buffer *),
1971 CIO2_MAX_BUFFERS, j);
1975 * DMA clears the valid bit when accessing the buffer.
1976 * When stopping stream in suspend callback, some of the buffers
1977 * may be in invalid state. After resume, when DMA meets the invalid
1978 * buffer, it will halt and stop receiving new data.
1979 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1981 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1982 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1985 static int __maybe_unused cio2_suspend(struct device *dev)
1987 struct pci_dev *pci_dev = to_pci_dev(dev);
1988 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1989 struct cio2_queue *q = cio2->cur_queue;
1991 dev_dbg(dev, "cio2 suspend\n");
1992 if (!cio2->streaming)
1996 cio2_hw_exit(cio2, q);
1997 synchronize_irq(pci_dev->irq);
1999 pm_runtime_force_suspend(dev);
2002 * Upon resume, hw starts to process the fbpt entries from beginning,
2003 * so relocate the queued buffs to the fbpt head before suspend.
2005 cio2_fbpt_rearrange(cio2, q);
2012 static int __maybe_unused cio2_resume(struct device *dev)
2014 struct cio2_device *cio2 = dev_get_drvdata(dev);
2016 struct cio2_queue *q = cio2->cur_queue;
2018 dev_dbg(dev, "cio2 resume\n");
2019 if (!cio2->streaming)
2022 r = pm_runtime_force_resume(&cio2->pci_dev->dev);
2024 dev_err(&cio2->pci_dev->dev,
2025 "failed to set power %d\n", r);
2029 r = cio2_hw_init(cio2, q);
2031 dev_err(dev, "fail to init cio2 hw\n");
2036 static const struct dev_pm_ops cio2_pm_ops = {
2037 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2038 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2041 static const struct pci_device_id cio2_pci_id_table[] = {
2042 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2046 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2048 static struct pci_driver cio2_pci_driver = {
2050 .id_table = cio2_pci_id_table,
2051 .probe = cio2_pci_probe,
2052 .remove = cio2_pci_remove,
2058 module_pci_driver(cio2_pci_driver);
2060 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2061 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2062 MODULE_AUTHOR("Jian Xu Zheng");
2063 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2064 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2065 MODULE_LICENSE("GPL v2");
2066 MODULE_DESCRIPTION("IPU3 CIO2 driver");