Merge tag 'media/v5.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux-2.6-block.git] / drivers / staging / media / ipu3 / ipu3.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 - 2018 Intel Corporation
4  * Copyright 2017 Google LLC
5  *
6  * Based on Intel IPU4 driver.
7  *
8  */
9
10 #include <linux/delay.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pm_runtime.h>
14
15 #include "ipu3.h"
16 #include "ipu3-dmamap.h"
17 #include "ipu3-mmu.h"
18
19 #define IMGU_PCI_ID                     0x1919
20 #define IMGU_PCI_BAR                    0
21 #define IMGU_DMA_MASK                   DMA_BIT_MASK(39)
22 #define IMGU_MAX_QUEUE_DEPTH            (2 + 2)
23
24 /*
25  * pre-allocated buffer size for IMGU dummy buffers. Those
26  * values should be tuned to big enough to avoid buffer
27  * re-allocation when streaming to lower streaming latency.
28  */
29 #define CSS_QUEUE_IN_BUF_SIZE           0
30 #define CSS_QUEUE_PARAMS_BUF_SIZE       0
31 #define CSS_QUEUE_OUT_BUF_SIZE          (4160 * 3120 * 12 / 8)
32 #define CSS_QUEUE_VF_BUF_SIZE           (1920 * 1080 * 12 / 8)
33 #define CSS_QUEUE_STAT_3A_BUF_SIZE      sizeof(struct ipu3_uapi_stats_3a)
34
35 static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = {
36         [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE,
37         [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE,
38         [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE,
39         [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE,
40         [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE,
41 };
42
43 static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = {
44         [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input"},
45         [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"},
46         [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"},
47         [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"},
48         [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"},
49 };
50
51 unsigned int imgu_node_to_queue(unsigned int node)
52 {
53         return imgu_node_map[node].css_queue;
54 }
55
56 unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
57 {
58         unsigned int i;
59
60         for (i = 0; i < IMGU_NODE_NUM; i++)
61                 if (imgu_node_map[i].css_queue == css_queue)
62                         break;
63
64         return i;
65 }
66
67 /**************** Dummy buffers ****************/
68
69 static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
70 {
71         unsigned int i;
72         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
73
74         for (i = 0; i < IPU3_CSS_QUEUES; i++)
75                 imgu_dmamap_free(imgu,
76                                  &imgu_pipe->queues[i].dmap);
77 }
78
79 static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
80                                       unsigned int pipe)
81 {
82         unsigned int i;
83         size_t size;
84         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
85
86         for (i = 0; i < IPU3_CSS_QUEUES; i++) {
87                 size = css_queue_buf_size_map[i];
88                 /*
89                  * Do not enable dummy buffers for master queue,
90                  * always require that real buffers from user are
91                  * available.
92                  */
93                 if (i == IMGU_QUEUE_MASTER || size == 0)
94                         continue;
95
96                 if (!imgu_dmamap_alloc(imgu,
97                                        &imgu_pipe->queues[i].dmap, size)) {
98                         imgu_dummybufs_cleanup(imgu, pipe);
99                         return -ENOMEM;
100                 }
101         }
102
103         return 0;
104 }
105
106 static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
107 {
108         const struct v4l2_pix_format_mplane *mpix;
109         const struct v4l2_meta_format   *meta;
110         unsigned int i, k, node;
111         size_t size;
112         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
113
114         /* Allocate a dummy buffer for each queue where buffer is optional */
115         for (i = 0; i < IPU3_CSS_QUEUES; i++) {
116                 node = imgu_map_node(imgu, i);
117                 if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
118                         continue;
119
120                 if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled &&
121                     i == IPU3_CSS_QUEUE_VF)
122                         /*
123                          * Do not enable dummy buffers for VF if it is not
124                          * requested by the user.
125                          */
126                         continue;
127
128                 meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
129                 mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
130
131                 if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
132                         size = meta->buffersize;
133                 else
134                         size = mpix->plane_fmt[0].sizeimage;
135
136                 if (imgu_css_dma_buffer_resize(imgu,
137                                                &imgu_pipe->queues[i].dmap,
138                                                size)) {
139                         imgu_dummybufs_cleanup(imgu, pipe);
140                         return -ENOMEM;
141                 }
142
143                 for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
144                         imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
145                                           imgu_pipe->queues[i].dmap.daddr);
146         }
147
148         return 0;
149 }
150
151 /* May be called from atomic context */
152 static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
153                                                    int queue, unsigned int pipe)
154 {
155         unsigned int i;
156         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
157
158         /* dummybufs are not allocated for master q */
159         if (queue == IPU3_CSS_QUEUE_IN)
160                 return NULL;
161
162         if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
163                 /* Buffer should not be allocated here */
164                 return NULL;
165
166         for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
167                 if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
168                         IPU3_CSS_BUFFER_QUEUED)
169                         break;
170
171         if (i == IMGU_MAX_QUEUE_DEPTH)
172                 return NULL;
173
174         imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
175                           imgu_pipe->queues[queue].dmap.daddr);
176
177         return &imgu_pipe->queues[queue].dummybufs[i];
178 }
179
180 /* Check if given buffer is a dummy buffer */
181 static bool imgu_dummybufs_check(struct imgu_device *imgu,
182                                  struct imgu_css_buffer *buf,
183                                  unsigned int pipe)
184 {
185         unsigned int i;
186         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
187
188         for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
189                 if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
190                         break;
191
192         return i < IMGU_MAX_QUEUE_DEPTH;
193 }
194
195 static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
196                              enum vb2_buffer_state state)
197 {
198         mutex_lock(&imgu->lock);
199         imgu_v4l2_buffer_done(vb, state);
200         mutex_unlock(&imgu->lock);
201 }
202
203 static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
204                                                  unsigned int node,
205                                                  unsigned int pipe)
206 {
207         struct imgu_buffer *buf;
208         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
209
210         if (WARN_ON(node >= IMGU_NODE_NUM))
211                 return NULL;
212
213         /* Find first free buffer from the node */
214         list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
215                 if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
216                         return &buf->css_buf;
217         }
218
219         /* There were no free buffers, try to return a dummy buffer */
220         return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
221 }
222
223 /*
224  * Queue as many buffers to CSS as possible. If all buffers don't fit into
225  * CSS buffer queues, they remain unqueued and will be queued later.
226  */
227 int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe)
228 {
229         unsigned int node;
230         int r = 0;
231         struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
232
233         if (!imgu_css_is_streaming(&imgu->css))
234                 return 0;
235
236         dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
237         mutex_lock(&imgu->lock);
238
239         if (!imgu_css_pipe_queue_empty(&imgu->css, pipe)) {
240                 mutex_unlock(&imgu->lock);
241                 return 0;
242         }
243
244         /* Buffer set is queued to FW only when input buffer is ready */
245         for (node = IMGU_NODE_NUM - 1;
246              imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
247              node = node ? node - 1 : IMGU_NODE_NUM - 1) {
248                 if (node == IMGU_NODE_VF &&
249                     !imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
250                         dev_warn(&imgu->pci_dev->dev,
251                                  "Vf not enabled, ignore queue");
252                         continue;
253                 } else if (node == IMGU_NODE_PARAMS &&
254                            imgu_pipe->nodes[node].enabled) {
255                         struct vb2_buffer *vb;
256                         struct imgu_vb2_buffer *ivb;
257
258                         /* No parameters for this frame */
259                         if (list_empty(&imgu_pipe->nodes[node].buffers))
260                                 continue;
261
262                         ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
263                                                struct imgu_vb2_buffer, list);
264                         vb = &ivb->vbb.vb2_buf;
265                         r = imgu_css_set_parameters(&imgu->css, pipe,
266                                                     vb2_plane_vaddr(vb, 0));
267                         if (r) {
268                                 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
269                                 dev_warn(&imgu->pci_dev->dev,
270                                          "set parameters failed.");
271                                 continue;
272                         }
273
274                         vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
275                         dev_dbg(&imgu->pci_dev->dev,
276                                 "queue user parameters %d to css.", vb->index);
277                         list_del(&ivb->list);
278                 } else if (imgu_pipe->queue_enabled[node]) {
279                         struct imgu_css_buffer *buf =
280                                 imgu_queue_getbuf(imgu, node, pipe);
281                         struct imgu_buffer *ibuf = NULL;
282                         bool dummy;
283
284                         if (!buf)
285                                 break;
286
287                         r = imgu_css_buf_queue(&imgu->css, pipe, buf);
288                         if (r)
289                                 break;
290                         dummy = imgu_dummybufs_check(imgu, buf, pipe);
291                         if (!dummy)
292                                 ibuf = container_of(buf, struct imgu_buffer,
293                                                     css_buf);
294                         dev_dbg(&imgu->pci_dev->dev,
295                                 "queue %s %s buffer %u to css da: 0x%08x\n",
296                                 dummy ? "dummy" : "user",
297                                 imgu_node_map[node].name,
298                                 dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index,
299                                 (u32)buf->daddr);
300                 }
301         }
302         mutex_unlock(&imgu->lock);
303
304         if (r && r != -EBUSY)
305                 goto failed;
306
307         return 0;
308
309 failed:
310         /*
311          * On error, mark all buffers as failed which are not
312          * yet queued to CSS
313          */
314         dev_err(&imgu->pci_dev->dev,
315                 "failed to queue buffer to CSS on queue %i (%d)\n",
316                 node, r);
317
318         if (initial)
319                 /* If we were called from streamon(), no need to finish bufs */
320                 return r;
321
322         for (node = 0; node < IMGU_NODE_NUM; node++) {
323                 struct imgu_buffer *buf, *buf0;
324
325                 if (!imgu_pipe->queue_enabled[node])
326                         continue;       /* Skip disabled queues */
327
328                 mutex_lock(&imgu->lock);
329                 list_for_each_entry_safe(buf, buf0,
330                                          &imgu_pipe->nodes[node].buffers,
331                                          vid_buf.list) {
332                         if (imgu_css_buf_state(&buf->css_buf) ==
333                             IPU3_CSS_BUFFER_QUEUED)
334                                 continue;       /* Was already queued, skip */
335
336                         imgu_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf,
337                                               VB2_BUF_STATE_ERROR);
338                 }
339                 mutex_unlock(&imgu->lock);
340         }
341
342         return r;
343 }
344
345 static int imgu_powerup(struct imgu_device *imgu)
346 {
347         int r;
348
349         r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base);
350         if (r)
351                 return r;
352
353         imgu_mmu_resume(imgu->mmu);
354         return 0;
355 }
356
357 static void imgu_powerdown(struct imgu_device *imgu)
358 {
359         imgu_mmu_suspend(imgu->mmu);
360         imgu_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
361 }
362
363 int imgu_s_stream(struct imgu_device *imgu, int enable)
364 {
365         struct device *dev = &imgu->pci_dev->dev;
366         int r, pipe;
367
368         if (!enable) {
369                 /* Stop streaming */
370                 dev_dbg(dev, "stream off\n");
371                 /* Block new buffers to be queued to CSS. */
372                 atomic_set(&imgu->qbuf_barrier, 1);
373                 imgu_css_stop_streaming(&imgu->css);
374                 synchronize_irq(imgu->pci_dev->irq);
375                 atomic_set(&imgu->qbuf_barrier, 0);
376                 imgu_powerdown(imgu);
377                 pm_runtime_put(&imgu->pci_dev->dev);
378
379                 return 0;
380         }
381
382         /* Set Power */
383         r = pm_runtime_get_sync(dev);
384         if (r < 0) {
385                 dev_err(dev, "failed to set imgu power\n");
386                 pm_runtime_put(dev);
387                 return r;
388         }
389
390         r = imgu_powerup(imgu);
391         if (r) {
392                 dev_err(dev, "failed to power up imgu\n");
393                 pm_runtime_put(dev);
394                 return r;
395         }
396
397         /* Start CSS streaming */
398         r = imgu_css_start_streaming(&imgu->css);
399         if (r) {
400                 dev_err(dev, "failed to start css streaming (%d)", r);
401                 goto fail_start_streaming;
402         }
403
404         for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
405                 /* Initialize dummy buffers */
406                 r = imgu_dummybufs_init(imgu, pipe);
407                 if (r) {
408                         dev_err(dev, "failed to initialize dummy buffers (%d)", r);
409                         goto fail_dummybufs;
410                 }
411
412                 /* Queue as many buffers from queue as possible */
413                 r = imgu_queue_buffers(imgu, true, pipe);
414                 if (r) {
415                         dev_err(dev, "failed to queue initial buffers (%d)", r);
416                         goto fail_queueing;
417                 }
418         }
419
420         return 0;
421 fail_queueing:
422         for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
423                 imgu_dummybufs_cleanup(imgu, pipe);
424 fail_dummybufs:
425         imgu_css_stop_streaming(&imgu->css);
426 fail_start_streaming:
427         pm_runtime_put(dev);
428
429         return r;
430 }
431
432 static int imgu_video_nodes_init(struct imgu_device *imgu)
433 {
434         struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
435         struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
436         struct imgu_media_pipe *imgu_pipe;
437         unsigned int i, j;
438         int r;
439
440         imgu->buf_struct_size = sizeof(struct imgu_buffer);
441
442         for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
443                 imgu_pipe = &imgu->imgu_pipe[j];
444
445                 for (i = 0; i < IMGU_NODE_NUM; i++) {
446                         imgu_pipe->nodes[i].name = imgu_node_map[i].name;
447                         imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
448                         imgu_pipe->nodes[i].enabled = false;
449
450                         if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
451                                 fmts[imgu_node_map[i].css_queue] =
452                                         &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp;
453                         atomic_set(&imgu_pipe->nodes[i].sequence, 0);
454                 }
455         }
456
457         r = imgu_v4l2_register(imgu);
458         if (r)
459                 return r;
460
461         /* Set initial formats and initialize formats of video nodes */
462         for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
463                 imgu_pipe = &imgu->imgu_pipe[j];
464
465                 rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
466                 rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
467                 imgu_css_fmt_set(&imgu->css, fmts, rects, j);
468
469                 /* Pre-allocate dummy buffers */
470                 r = imgu_dummybufs_preallocate(imgu, j);
471                 if (r) {
472                         dev_err(&imgu->pci_dev->dev,
473                                 "failed to pre-allocate dummy buffers (%d)", r);
474                         goto out_cleanup;
475                 }
476         }
477
478         return 0;
479
480 out_cleanup:
481         for (j = 0; j < IMGU_MAX_PIPE_NUM; j++)
482                 imgu_dummybufs_cleanup(imgu, j);
483
484         imgu_v4l2_unregister(imgu);
485
486         return r;
487 }
488
489 static void imgu_video_nodes_exit(struct imgu_device *imgu)
490 {
491         int i;
492
493         for (i = 0; i < IMGU_MAX_PIPE_NUM; i++)
494                 imgu_dummybufs_cleanup(imgu, i);
495
496         imgu_v4l2_unregister(imgu);
497 }
498
499 /**************** PCI interface ****************/
500
501 static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
502 {
503         struct imgu_device *imgu = imgu_ptr;
504         struct imgu_media_pipe *imgu_pipe;
505         int p;
506
507         /* Dequeue / queue buffers */
508         do {
509                 u64 ns = ktime_get_ns();
510                 struct imgu_css_buffer *b;
511                 struct imgu_buffer *buf = NULL;
512                 unsigned int node, pipe;
513                 bool dummy;
514
515                 do {
516                         mutex_lock(&imgu->lock);
517                         b = imgu_css_buf_dequeue(&imgu->css);
518                         mutex_unlock(&imgu->lock);
519                 } while (PTR_ERR(b) == -EAGAIN);
520
521                 if (IS_ERR(b)) {
522                         if (PTR_ERR(b) != -EBUSY)       /* All done */
523                                 dev_err(&imgu->pci_dev->dev,
524                                         "failed to dequeue buffers (%ld)\n",
525                                         PTR_ERR(b));
526                         break;
527                 }
528
529                 node = imgu_map_node(imgu, b->queue);
530                 pipe = b->pipe;
531                 dummy = imgu_dummybufs_check(imgu, b, pipe);
532                 if (!dummy)
533                         buf = container_of(b, struct imgu_buffer, css_buf);
534                 dev_dbg(&imgu->pci_dev->dev,
535                         "dequeue %s %s buffer %d daddr 0x%x from css\n",
536                         dummy ? "dummy" : "user",
537                         imgu_node_map[node].name,
538                         dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index,
539                         (u32)b->daddr);
540
541                 if (dummy)
542                         /* It was a dummy buffer, skip it */
543                         continue;
544
545                 /* Fill vb2 buffer entries and tell it's ready */
546                 imgu_pipe = &imgu->imgu_pipe[pipe];
547                 if (!imgu_pipe->nodes[node].output) {
548                         buf->vid_buf.vbb.vb2_buf.timestamp = ns;
549                         buf->vid_buf.vbb.field = V4L2_FIELD_NONE;
550                         buf->vid_buf.vbb.sequence =
551                                 atomic_inc_return(
552                                 &imgu_pipe->nodes[node].sequence);
553                         dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d",
554                                 buf->vid_buf.vbb.sequence);
555                 }
556                 imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
557                                  imgu_css_buf_state(&buf->css_buf) ==
558                                                     IPU3_CSS_BUFFER_DONE ?
559                                                     VB2_BUF_STATE_DONE :
560                                                     VB2_BUF_STATE_ERROR);
561                 mutex_lock(&imgu->lock);
562                 if (imgu_css_queue_empty(&imgu->css))
563                         wake_up_all(&imgu->buf_drain_wq);
564                 mutex_unlock(&imgu->lock);
565         } while (1);
566
567         /*
568          * Try to queue more buffers for CSS.
569          * qbuf_barrier is used to disable new buffers
570          * to be queued to CSS.
571          */
572         if (!atomic_read(&imgu->qbuf_barrier))
573                 for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
574                         imgu_queue_buffers(imgu, false, p);
575
576         return IRQ_HANDLED;
577 }
578
579 static irqreturn_t imgu_isr(int irq, void *imgu_ptr)
580 {
581         struct imgu_device *imgu = imgu_ptr;
582
583         /* acknowledge interruption */
584         if (imgu_css_irq_ack(&imgu->css) < 0)
585                 return IRQ_NONE;
586
587         return IRQ_WAKE_THREAD;
588 }
589
590 static int imgu_pci_config_setup(struct pci_dev *dev)
591 {
592         u16 pci_command;
593         int r = pci_enable_msi(dev);
594
595         if (r) {
596                 dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
597                 return r;
598         }
599
600         pci_read_config_word(dev, PCI_COMMAND, &pci_command);
601         pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
602                         PCI_COMMAND_INTX_DISABLE;
603         pci_write_config_word(dev, PCI_COMMAND, pci_command);
604
605         return 0;
606 }
607
608 static int imgu_pci_probe(struct pci_dev *pci_dev,
609                           const struct pci_device_id *id)
610 {
611         struct imgu_device *imgu;
612         phys_addr_t phys;
613         unsigned long phys_len;
614         void __iomem *const *iomap;
615         int r;
616
617         imgu = devm_kzalloc(&pci_dev->dev, sizeof(*imgu), GFP_KERNEL);
618         if (!imgu)
619                 return -ENOMEM;
620
621         imgu->pci_dev = pci_dev;
622
623         r = pcim_enable_device(pci_dev);
624         if (r) {
625                 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
626                 return r;
627         }
628
629         dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
630                  pci_dev->device, pci_dev->revision);
631
632         phys = pci_resource_start(pci_dev, IMGU_PCI_BAR);
633         phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR);
634
635         r = pcim_iomap_regions(pci_dev, 1 << IMGU_PCI_BAR, pci_name(pci_dev));
636         if (r) {
637                 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
638                 return r;
639         }
640         dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n",
641                  &phys, phys_len);
642
643         iomap = pcim_iomap_table(pci_dev);
644         if (!iomap) {
645                 dev_err(&pci_dev->dev, "failed to iomap table\n");
646                 return -ENODEV;
647         }
648
649         imgu->base = iomap[IMGU_PCI_BAR];
650
651         pci_set_drvdata(pci_dev, imgu);
652
653         pci_set_master(pci_dev);
654
655         r = dma_coerce_mask_and_coherent(&pci_dev->dev, IMGU_DMA_MASK);
656         if (r) {
657                 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
658                 return -ENODEV;
659         }
660
661         r = imgu_pci_config_setup(pci_dev);
662         if (r)
663                 return r;
664
665         mutex_init(&imgu->lock);
666         atomic_set(&imgu->qbuf_barrier, 0);
667         init_waitqueue_head(&imgu->buf_drain_wq);
668
669         r = imgu_css_set_powerup(&pci_dev->dev, imgu->base);
670         if (r) {
671                 dev_err(&pci_dev->dev,
672                         "failed to power up CSS (%d)\n", r);
673                 goto out_mutex_destroy;
674         }
675
676         imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base);
677         if (IS_ERR(imgu->mmu)) {
678                 r = PTR_ERR(imgu->mmu);
679                 dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r);
680                 goto out_css_powerdown;
681         }
682
683         r = imgu_dmamap_init(imgu);
684         if (r) {
685                 dev_err(&pci_dev->dev,
686                         "failed to initialize DMA mapping (%d)\n", r);
687                 goto out_mmu_exit;
688         }
689
690         /* ISP programming */
691         r = imgu_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
692         if (r) {
693                 dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r);
694                 goto out_dmamap_exit;
695         }
696
697         /* v4l2 sub-device registration */
698         r = imgu_video_nodes_init(imgu);
699         if (r) {
700                 dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n",
701                         r);
702                 goto out_css_cleanup;
703         }
704
705         r = devm_request_threaded_irq(&pci_dev->dev, pci_dev->irq,
706                                       imgu_isr, imgu_isr_threaded,
707                                       IRQF_SHARED, IMGU_NAME, imgu);
708         if (r) {
709                 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
710                 goto out_video_exit;
711         }
712
713         pm_runtime_put_noidle(&pci_dev->dev);
714         pm_runtime_allow(&pci_dev->dev);
715
716         return 0;
717
718 out_video_exit:
719         imgu_video_nodes_exit(imgu);
720 out_css_cleanup:
721         imgu_css_cleanup(&imgu->css);
722 out_dmamap_exit:
723         imgu_dmamap_exit(imgu);
724 out_mmu_exit:
725         imgu_mmu_exit(imgu->mmu);
726 out_css_powerdown:
727         imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
728 out_mutex_destroy:
729         mutex_destroy(&imgu->lock);
730
731         return r;
732 }
733
734 static void imgu_pci_remove(struct pci_dev *pci_dev)
735 {
736         struct imgu_device *imgu = pci_get_drvdata(pci_dev);
737
738         pm_runtime_forbid(&pci_dev->dev);
739         pm_runtime_get_noresume(&pci_dev->dev);
740
741         imgu_video_nodes_exit(imgu);
742         imgu_css_cleanup(&imgu->css);
743         imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
744         imgu_dmamap_exit(imgu);
745         imgu_mmu_exit(imgu->mmu);
746         mutex_destroy(&imgu->lock);
747 }
748
749 static int __maybe_unused imgu_suspend(struct device *dev)
750 {
751         struct pci_dev *pci_dev = to_pci_dev(dev);
752         struct imgu_device *imgu = pci_get_drvdata(pci_dev);
753
754         dev_dbg(dev, "enter %s\n", __func__);
755         imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
756         if (!imgu->suspend_in_stream)
757                 goto out;
758         /* Block new buffers to be queued to CSS. */
759         atomic_set(&imgu->qbuf_barrier, 1);
760         /*
761          * Wait for currently running irq handler to be done so that
762          * no new buffers will be queued to fw later.
763          */
764         synchronize_irq(pci_dev->irq);
765         /* Wait until all buffers in CSS are done. */
766         if (!wait_event_timeout(imgu->buf_drain_wq,
767             imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
768                 dev_err(dev, "wait buffer drain timeout.\n");
769
770         imgu_css_stop_streaming(&imgu->css);
771         atomic_set(&imgu->qbuf_barrier, 0);
772         imgu_powerdown(imgu);
773         pm_runtime_force_suspend(dev);
774 out:
775         dev_dbg(dev, "leave %s\n", __func__);
776         return 0;
777 }
778
779 static int __maybe_unused imgu_resume(struct device *dev)
780 {
781         struct pci_dev *pci_dev = to_pci_dev(dev);
782         struct imgu_device *imgu = pci_get_drvdata(pci_dev);
783         int r = 0;
784         unsigned int pipe;
785
786         dev_dbg(dev, "enter %s\n", __func__);
787
788         if (!imgu->suspend_in_stream)
789                 goto out;
790
791         pm_runtime_force_resume(dev);
792
793         r = imgu_powerup(imgu);
794         if (r) {
795                 dev_err(dev, "failed to power up imgu\n");
796                 goto out;
797         }
798
799         /* Start CSS streaming */
800         r = imgu_css_start_streaming(&imgu->css);
801         if (r) {
802                 dev_err(dev, "failed to resume css streaming (%d)", r);
803                 goto out;
804         }
805
806         for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
807                 r = imgu_queue_buffers(imgu, true, pipe);
808                 if (r)
809                         dev_err(dev, "failed to queue buffers to pipe %d (%d)",
810                                 pipe, r);
811         }
812
813 out:
814         dev_dbg(dev, "leave %s\n", __func__);
815
816         return r;
817 }
818
819 /*
820  * PCI rpm framework checks the existence of driver rpm callbacks.
821  * Place a dummy callback here to avoid rpm going into error state.
822  */
823 static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
824 {
825         return 0;
826 }
827
828 static const struct dev_pm_ops imgu_pm_ops = {
829         SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL)
830         SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume)
831 };
832
833 static const struct pci_device_id imgu_pci_tbl[] = {
834         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) },
835         { 0, }
836 };
837
838 MODULE_DEVICE_TABLE(pci, imgu_pci_tbl);
839
840 static struct pci_driver imgu_pci_driver = {
841         .name = IMGU_NAME,
842         .id_table = imgu_pci_tbl,
843         .probe = imgu_pci_probe,
844         .remove = imgu_pci_remove,
845         .driver = {
846                 .pm = &imgu_pm_ops,
847         },
848 };
849
850 module_pci_driver(imgu_pci_driver);
851
852 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
853 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
854 MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
855 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
856 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
857 MODULE_LICENSE("GPL v2");
858 MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver");