Merge remote-tracking branches 'asoc/topic/ad193x', 'asoc/topic/alc5632', 'asoc/topic...
[linux-2.6-block.git] / drivers / media / pci / cx23885 / cx23885-core.c
CommitLineData
d19770e5
ST
1/*
2 * Driver for the Conexant CX23885 PCIe bridge
3 *
6d897616 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
d19770e5
ST
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
d19770e5
ST
16 */
17
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kmod.h>
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/div64.h>
78db8547 28#include <linux/firmware.h>
d19770e5
ST
29
30#include "cx23885.h"
5a23b076 31#include "cimax2.h"
78db8547 32#include "altera-ci.h"
29f8a0a5 33#include "cx23888-ir.h"
f59ad611 34#include "cx23885-ir.h"
e5514f10 35#include "cx23885-av.h"
dbda8f70 36#include "cx23885-input.h"
d19770e5
ST
37
38MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
6d897616 39MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
d19770e5 40MODULE_LICENSE("GPL");
1990d50b 41MODULE_VERSION(CX23885_VERSION);
d19770e5 42
4513fc69 43static unsigned int debug;
9c8ced51
ST
44module_param(debug, int, 0644);
45MODULE_PARM_DESC(debug, "enable debug messages");
d19770e5
ST
46
47static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48module_param_array(card, int, NULL, 0444);
9c8ced51 49MODULE_PARM_DESC(card, "card type");
d19770e5 50
4513fc69
ST
51#define dprintk(level, fmt, arg...)\
52 do { if (debug >= level)\
b5f74050 53 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
4513fc69 54 } while (0)
d19770e5
ST
55
56static unsigned int cx23885_devcount;
57
d19770e5
ST
58#define NO_SYNC_LINE (-1U)
59
d19770e5
ST
60/* FIXME, these allocations will change when
61 * analog arrives. The be reviewed.
62 * CX23887 Assumptions
63 * 1 line = 16 bytes of CDT
64 * cmds size = 80
65 * cdt size = 16 * linesize
66 * iqsize = 64
67 * maxlines = 6
68 *
69 * Address Space:
70 * 0x00000000 0x00008fff FIFO clusters
71 * 0x00010000 0x000104af Channel Management Data Structures
72 * 0x000104b0 0x000104ff Free
73 * 0x00010500 0x000108bf 15 channels * iqsize
74 * 0x000108c0 0x000108ff Free
75 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76 * 15 channels * (iqsize + (maxlines * linesize))
77 * 0x00010ea0 0x00010xxx Free
78 */
79
7e994302 80static struct sram_channel cx23885_sram_channels[] = {
d19770e5 81 [SRAM_CH01] = {
69ad6e56
ST
82 .name = "VID A",
83 .cmds_start = 0x10000,
d8d12b43
ST
84 .ctrl_start = 0x10380,
85 .cdt = 0x104c0,
69ad6e56
ST
86 .fifo_start = 0x40,
87 .fifo_size = 0x2800,
d19770e5
ST
88 .ptr1_reg = DMA1_PTR1,
89 .ptr2_reg = DMA1_PTR2,
90 .cnt1_reg = DMA1_CNT1,
91 .cnt2_reg = DMA1_CNT2,
92 },
93 [SRAM_CH02] = {
94 .name = "ch2",
95 .cmds_start = 0x0,
96 .ctrl_start = 0x0,
97 .cdt = 0x0,
98 .fifo_start = 0x0,
99 .fifo_size = 0x0,
100 .ptr1_reg = DMA2_PTR1,
101 .ptr2_reg = DMA2_PTR2,
102 .cnt1_reg = DMA2_CNT1,
103 .cnt2_reg = DMA2_CNT2,
104 },
105 [SRAM_CH03] = {
69ad6e56
ST
106 .name = "TS1 B",
107 .cmds_start = 0x100A0,
d8d12b43
ST
108 .ctrl_start = 0x10400,
109 .cdt = 0x10580,
69ad6e56
ST
110 .fifo_start = 0x5000,
111 .fifo_size = 0x1000,
d19770e5
ST
112 .ptr1_reg = DMA3_PTR1,
113 .ptr2_reg = DMA3_PTR2,
114 .cnt1_reg = DMA3_CNT1,
115 .cnt2_reg = DMA3_CNT2,
116 },
117 [SRAM_CH04] = {
118 .name = "ch4",
119 .cmds_start = 0x0,
120 .ctrl_start = 0x0,
121 .cdt = 0x0,
122 .fifo_start = 0x0,
123 .fifo_size = 0x0,
124 .ptr1_reg = DMA4_PTR1,
125 .ptr2_reg = DMA4_PTR2,
126 .cnt1_reg = DMA4_CNT1,
127 .cnt2_reg = DMA4_CNT2,
128 },
129 [SRAM_CH05] = {
130 .name = "ch5",
131 .cmds_start = 0x0,
132 .ctrl_start = 0x0,
133 .cdt = 0x0,
134 .fifo_start = 0x0,
135 .fifo_size = 0x0,
136 .ptr1_reg = DMA5_PTR1,
137 .ptr2_reg = DMA5_PTR2,
138 .cnt1_reg = DMA5_CNT1,
139 .cnt2_reg = DMA5_CNT2,
140 },
141 [SRAM_CH06] = {
142 .name = "TS2 C",
143 .cmds_start = 0x10140,
d8d12b43
ST
144 .ctrl_start = 0x10440,
145 .cdt = 0x105e0,
d19770e5
ST
146 .fifo_start = 0x6000,
147 .fifo_size = 0x1000,
148 .ptr1_reg = DMA5_PTR1,
149 .ptr2_reg = DMA5_PTR2,
150 .cnt1_reg = DMA5_CNT1,
151 .cnt2_reg = DMA5_CNT2,
152 },
153 [SRAM_CH07] = {
9e44d632
MM
154 .name = "TV Audio",
155 .cmds_start = 0x10190,
156 .ctrl_start = 0x10480,
157 .cdt = 0x10a00,
158 .fifo_start = 0x7000,
159 .fifo_size = 0x1000,
d19770e5
ST
160 .ptr1_reg = DMA6_PTR1,
161 .ptr2_reg = DMA6_PTR2,
162 .cnt1_reg = DMA6_CNT1,
163 .cnt2_reg = DMA6_CNT2,
164 },
165 [SRAM_CH08] = {
166 .name = "ch8",
167 .cmds_start = 0x0,
168 .ctrl_start = 0x0,
169 .cdt = 0x0,
170 .fifo_start = 0x0,
171 .fifo_size = 0x0,
172 .ptr1_reg = DMA7_PTR1,
173 .ptr2_reg = DMA7_PTR2,
174 .cnt1_reg = DMA7_CNT1,
175 .cnt2_reg = DMA7_CNT2,
176 },
177 [SRAM_CH09] = {
178 .name = "ch9",
179 .cmds_start = 0x0,
180 .ctrl_start = 0x0,
181 .cdt = 0x0,
182 .fifo_start = 0x0,
183 .fifo_size = 0x0,
184 .ptr1_reg = DMA8_PTR1,
185 .ptr2_reg = DMA8_PTR2,
186 .cnt1_reg = DMA8_CNT1,
187 .cnt2_reg = DMA8_CNT2,
188 },
189};
190
7e994302
ST
191static struct sram_channel cx23887_sram_channels[] = {
192 [SRAM_CH01] = {
193 .name = "VID A",
194 .cmds_start = 0x10000,
195 .ctrl_start = 0x105b0,
196 .cdt = 0x107b0,
197 .fifo_start = 0x40,
198 .fifo_size = 0x2800,
199 .ptr1_reg = DMA1_PTR1,
200 .ptr2_reg = DMA1_PTR2,
201 .cnt1_reg = DMA1_CNT1,
202 .cnt2_reg = DMA1_CNT2,
203 },
204 [SRAM_CH02] = {
35045137
ST
205 .name = "VID A (VBI)",
206 .cmds_start = 0x10050,
207 .ctrl_start = 0x105F0,
208 .cdt = 0x10810,
209 .fifo_start = 0x3000,
210 .fifo_size = 0x1000,
7e994302
ST
211 .ptr1_reg = DMA2_PTR1,
212 .ptr2_reg = DMA2_PTR2,
213 .cnt1_reg = DMA2_CNT1,
214 .cnt2_reg = DMA2_CNT2,
215 },
216 [SRAM_CH03] = {
217 .name = "TS1 B",
218 .cmds_start = 0x100A0,
219 .ctrl_start = 0x10630,
220 .cdt = 0x10870,
221 .fifo_start = 0x5000,
222 .fifo_size = 0x1000,
223 .ptr1_reg = DMA3_PTR1,
224 .ptr2_reg = DMA3_PTR2,
225 .cnt1_reg = DMA3_CNT1,
226 .cnt2_reg = DMA3_CNT2,
227 },
228 [SRAM_CH04] = {
229 .name = "ch4",
230 .cmds_start = 0x0,
231 .ctrl_start = 0x0,
232 .cdt = 0x0,
233 .fifo_start = 0x0,
234 .fifo_size = 0x0,
235 .ptr1_reg = DMA4_PTR1,
236 .ptr2_reg = DMA4_PTR2,
237 .cnt1_reg = DMA4_CNT1,
238 .cnt2_reg = DMA4_CNT2,
239 },
240 [SRAM_CH05] = {
241 .name = "ch5",
242 .cmds_start = 0x0,
243 .ctrl_start = 0x0,
244 .cdt = 0x0,
245 .fifo_start = 0x0,
246 .fifo_size = 0x0,
247 .ptr1_reg = DMA5_PTR1,
248 .ptr2_reg = DMA5_PTR2,
249 .cnt1_reg = DMA5_CNT1,
250 .cnt2_reg = DMA5_CNT2,
251 },
252 [SRAM_CH06] = {
253 .name = "TS2 C",
254 .cmds_start = 0x10140,
255 .ctrl_start = 0x10670,
256 .cdt = 0x108d0,
257 .fifo_start = 0x6000,
258 .fifo_size = 0x1000,
259 .ptr1_reg = DMA5_PTR1,
260 .ptr2_reg = DMA5_PTR2,
261 .cnt1_reg = DMA5_CNT1,
262 .cnt2_reg = DMA5_CNT2,
263 },
264 [SRAM_CH07] = {
35045137
ST
265 .name = "TV Audio",
266 .cmds_start = 0x10190,
267 .ctrl_start = 0x106B0,
268 .cdt = 0x10930,
269 .fifo_start = 0x7000,
270 .fifo_size = 0x1000,
7e994302
ST
271 .ptr1_reg = DMA6_PTR1,
272 .ptr2_reg = DMA6_PTR2,
273 .cnt1_reg = DMA6_CNT1,
274 .cnt2_reg = DMA6_CNT2,
275 },
276 [SRAM_CH08] = {
277 .name = "ch8",
278 .cmds_start = 0x0,
279 .ctrl_start = 0x0,
280 .cdt = 0x0,
281 .fifo_start = 0x0,
282 .fifo_size = 0x0,
283 .ptr1_reg = DMA7_PTR1,
284 .ptr2_reg = DMA7_PTR2,
285 .cnt1_reg = DMA7_CNT1,
286 .cnt2_reg = DMA7_CNT2,
287 },
288 [SRAM_CH09] = {
289 .name = "ch9",
290 .cmds_start = 0x0,
291 .ctrl_start = 0x0,
292 .cdt = 0x0,
293 .fifo_start = 0x0,
294 .fifo_size = 0x0,
295 .ptr1_reg = DMA8_PTR1,
296 .ptr2_reg = DMA8_PTR2,
297 .cnt1_reg = DMA8_CNT1,
298 .cnt2_reg = DMA8_CNT2,
299 },
300};
301
ada73eee 302static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
dbe83a3b
AW
303{
304 unsigned long flags;
305 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306
307 dev->pci_irqmask |= mask;
308
309 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310}
311
312void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313{
314 unsigned long flags;
315 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316
317 dev->pci_irqmask |= mask;
318 cx_set(PCI_INT_MSK, mask);
319
320 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321}
322
323void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324{
325 u32 v;
326 unsigned long flags;
327 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328
329 v = mask & dev->pci_irqmask;
330 if (v)
331 cx_set(PCI_INT_MSK, v);
332
333 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334}
335
336static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337{
338 cx23885_irq_enable(dev, 0xffffffff);
339}
340
341void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342{
343 unsigned long flags;
344 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345
346 cx_clear(PCI_INT_MSK, mask);
347
348 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349}
350
351static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352{
353 cx23885_irq_disable(dev, 0xffffffff);
354}
355
356void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357{
358 unsigned long flags;
359 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360
361 dev->pci_irqmask &= ~mask;
362 cx_clear(PCI_INT_MSK, mask);
363
364 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365}
366
367static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368{
369 u32 v;
370 unsigned long flags;
371 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372
373 v = cx_read(PCI_INT_MSK);
374
375 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376 return v;
377}
378
d19770e5
ST
379static int cx23885_risc_decode(u32 risc)
380{
381 static char *instr[16] = {
b1b81f1d
ST
382 [RISC_SYNC >> 28] = "sync",
383 [RISC_WRITE >> 28] = "write",
384 [RISC_WRITEC >> 28] = "writec",
385 [RISC_READ >> 28] = "read",
386 [RISC_READC >> 28] = "readc",
387 [RISC_JUMP >> 28] = "jump",
388 [RISC_SKIP >> 28] = "skip",
389 [RISC_WRITERM >> 28] = "writerm",
390 [RISC_WRITECM >> 28] = "writecm",
391 [RISC_WRITECR >> 28] = "writecr",
d19770e5
ST
392 };
393 static int incr[16] = {
b1b81f1d
ST
394 [RISC_WRITE >> 28] = 3,
395 [RISC_JUMP >> 28] = 3,
396 [RISC_SKIP >> 28] = 1,
397 [RISC_SYNC >> 28] = 1,
398 [RISC_WRITERM >> 28] = 3,
399 [RISC_WRITECM >> 28] = 3,
400 [RISC_WRITECR >> 28] = 4,
d19770e5
ST
401 };
402 static char *bits[] = {
403 "12", "13", "14", "resync",
404 "cnt0", "cnt1", "18", "19",
405 "20", "21", "22", "23",
406 "irq1", "irq2", "eol", "sol",
407 };
408 int i;
409
410 printk("0x%08x [ %s", risc,
411 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
44a6481d 412 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
d19770e5 413 if (risc & (1 << (i + 12)))
44a6481d 414 printk(" %s", bits[i]);
d19770e5
ST
415 printk(" count=%d ]\n", risc & 0xfff);
416 return incr[risc >> 28] ? incr[risc >> 28] : 1;
417}
418
453afdd9 419static void cx23885_wakeup(struct cx23885_tsport *port,
39e75cfe 420 struct cx23885_dmaqueue *q, u32 count)
d19770e5
ST
421{
422 struct cx23885_dev *dev = port->dev;
423 struct cx23885_buffer *buf;
d19770e5 424
9c8ced51 425 if (list_empty(&q->active))
453afdd9
HV
426 return;
427 buf = list_entry(q->active.next,
428 struct cx23885_buffer, queue);
429
430 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
431 buf->vb.v4l2_buf.sequence = q->count++;
432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
433 count, q->count);
434 list_del(&buf->queue);
435 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
d19770e5 436}
d19770e5 437
7b888014 438int cx23885_sram_channel_setup(struct cx23885_dev *dev,
39e75cfe
AB
439 struct sram_channel *ch,
440 unsigned int bpl, u32 risc)
d19770e5 441{
44a6481d 442 unsigned int i, lines;
d19770e5
ST
443 u32 cdt;
444
9c8ced51 445 if (ch->cmds_start == 0) {
22b4e64f 446 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
44a6481d 447 ch->name);
d19770e5
ST
448 cx_write(ch->ptr1_reg, 0);
449 cx_write(ch->ptr2_reg, 0);
450 cx_write(ch->cnt2_reg, 0);
451 cx_write(ch->cnt1_reg, 0);
452 return 0;
453 } else {
22b4e64f 454 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
44a6481d 455 ch->name);
d19770e5
ST
456 }
457
458 bpl = (bpl + 7) & ~7; /* alignment */
459 cdt = ch->cdt;
460 lines = ch->fifo_size / bpl;
461 if (lines > 6)
462 lines = 6;
463 BUG_ON(lines < 2);
464
453afdd9
HV
465 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
466 cx_write(8 + 4, 12);
86ecc027 467 cx_write(8 + 8, 0);
d19770e5
ST
468
469 /* write CDT */
470 for (i = 0; i < lines; i++) {
22b4e64f 471 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
44a6481d 472 ch->fifo_start + bpl*i);
d19770e5
ST
473 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
474 cx_write(cdt + 16*i + 4, 0);
475 cx_write(cdt + 16*i + 8, 0);
476 cx_write(cdt + 16*i + 12, 0);
477 }
478
479 /* write CMDS */
480 if (ch->jumponly)
9c8ced51 481 cx_write(ch->cmds_start + 0, 8);
d19770e5 482 else
9c8ced51 483 cx_write(ch->cmds_start + 0, risc);
d19770e5
ST
484 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
485 cx_write(ch->cmds_start + 8, cdt);
486 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
487 cx_write(ch->cmds_start + 16, ch->ctrl_start);
488 if (ch->jumponly)
9c8ced51 489 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
d19770e5
ST
490 else
491 cx_write(ch->cmds_start + 20, 64 >> 2);
492 for (i = 24; i < 80; i += 4)
493 cx_write(ch->cmds_start + i, 0);
494
495 /* fill registers */
496 cx_write(ch->ptr1_reg, ch->fifo_start);
497 cx_write(ch->ptr2_reg, cdt);
498 cx_write(ch->cnt2_reg, (lines*16) >> 3);
9c8ced51 499 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
d19770e5 500
9c8ced51 501 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
e133be0f 502 dev->bridge,
d19770e5
ST
503 ch->name,
504 bpl,
505 lines);
506
507 return 0;
508}
509
7b888014 510void cx23885_sram_channel_dump(struct cx23885_dev *dev,
39e75cfe 511 struct sram_channel *ch)
d19770e5
ST
512{
513 static char *name[] = {
514 "init risc lo",
515 "init risc hi",
516 "cdt base",
517 "cdt size",
518 "iq base",
519 "iq size",
520 "risc pc lo",
521 "risc pc hi",
522 "iq wr ptr",
523 "iq rd ptr",
524 "cdt current",
525 "pci target lo",
526 "pci target hi",
527 "line / byte",
528 };
529 u32 risc;
44a6481d 530 unsigned int i, j, n;
d19770e5 531
9c8ced51 532 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
d19770e5
ST
533 dev->name, ch->name);
534 for (i = 0; i < ARRAY_SIZE(name); i++)
9c8ced51 535 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
d19770e5
ST
536 dev->name, name[i],
537 cx_read(ch->cmds_start + 4*i));
538
539 for (i = 0; i < 4; i++) {
44a6481d 540 risc = cx_read(ch->cmds_start + 4 * (i + 14));
9c8ced51 541 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
d19770e5
ST
542 cx23885_risc_decode(risc);
543 }
544 for (i = 0; i < (64 >> 2); i += n) {
44a6481d
MK
545 risc = cx_read(ch->ctrl_start + 4 * i);
546 /* No consideration for bits 63-32 */
547
9c8ced51 548 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
44a6481d 549 ch->ctrl_start + 4 * i, i);
d19770e5
ST
550 n = cx23885_risc_decode(risc);
551 for (j = 1; j < n; j++) {
44a6481d 552 risc = cx_read(ch->ctrl_start + 4 * (i + j));
9c8ced51 553 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
d19770e5
ST
554 dev->name, i+j, risc, j);
555 }
556 }
557
9c8ced51 558 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
d19770e5 559 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
9c8ced51 560 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
44a6481d 561 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
9c8ced51 562 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
d19770e5 563 dev->name, cx_read(ch->ptr1_reg));
9c8ced51 564 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
d19770e5 565 dev->name, cx_read(ch->ptr2_reg));
9c8ced51 566 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
d19770e5 567 dev->name, cx_read(ch->cnt1_reg));
9c8ced51 568 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
d19770e5
ST
569 dev->name, cx_read(ch->cnt2_reg));
570}
571
39e75cfe 572static void cx23885_risc_disasm(struct cx23885_tsport *port,
4d63a25c 573 struct cx23885_riscmem *risc)
d19770e5
ST
574{
575 struct cx23885_dev *dev = port->dev;
44a6481d 576 unsigned int i, j, n;
d19770e5 577
9c8ced51 578 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
d19770e5
ST
579 dev->name, risc->cpu, (unsigned long)risc->dma);
580 for (i = 0; i < (risc->size >> 2); i += n) {
9c8ced51 581 printk(KERN_INFO "%s: %04d: ", dev->name, i);
86ecc027 582 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
d19770e5 583 for (j = 1; j < n; j++)
9c8ced51 584 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
44a6481d 585 dev->name, i + j, risc->cpu[i + j], j);
86ecc027 586 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
d19770e5
ST
587 break;
588 }
589}
590
39e75cfe 591static void cx23885_shutdown(struct cx23885_dev *dev)
d19770e5
ST
592{
593 /* disable RISC controller */
594 cx_write(DEV_CNTRL2, 0);
595
596 /* Disable all IR activity */
597 cx_write(IR_CNTRL_REG, 0);
598
599 /* Disable Video A/B activity */
600 cx_write(VID_A_DMA_CTL, 0);
601 cx_write(VID_B_DMA_CTL, 0);
602 cx_write(VID_C_DMA_CTL, 0);
603
604 /* Disable Audio activity */
605 cx_write(AUD_INT_DMA_CTL, 0);
606 cx_write(AUD_EXT_DMA_CTL, 0);
607
608 /* Disable Serial port */
609 cx_write(UART_CTL, 0);
610
611 /* Disable Interrupts */
dbe83a3b 612 cx23885_irq_disable_all(dev);
d19770e5
ST
613 cx_write(VID_A_INT_MSK, 0);
614 cx_write(VID_B_INT_MSK, 0);
615 cx_write(VID_C_INT_MSK, 0);
616 cx_write(AUDIO_INT_INT_MSK, 0);
617 cx_write(AUDIO_EXT_INT_MSK, 0);
618
619}
620
39e75cfe 621static void cx23885_reset(struct cx23885_dev *dev)
d19770e5 622{
22b4e64f 623 dprintk(1, "%s()\n", __func__);
d19770e5
ST
624
625 cx23885_shutdown(dev);
626
627 cx_write(PCI_INT_STAT, 0xffffffff);
628 cx_write(VID_A_INT_STAT, 0xffffffff);
629 cx_write(VID_B_INT_STAT, 0xffffffff);
630 cx_write(VID_C_INT_STAT, 0xffffffff);
631 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
632 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
633 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
ecda5966 634 cx_write(PAD_CTRL, 0x00500300);
d19770e5
ST
635
636 mdelay(100);
637
7b888014
ST
638 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
639 720*4, 0);
640 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
641 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
642 188*4, 0);
643 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
644 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
645 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
646 188*4, 0);
647 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
648 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
649 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
d19770e5 650
a6a3f140 651 cx23885_gpio_setup(dev);
d19770e5
ST
652}
653
654
655static int cx23885_pci_quirks(struct cx23885_dev *dev)
656{
22b4e64f 657 dprintk(1, "%s()\n", __func__);
d19770e5 658
2df9a4c2
ST
659 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
660 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
661 * occur on the cx23887 bridge.
662 */
9c8ced51 663 if (dev->bridge == CX23885_BRIDGE_885)
d19770e5 664 cx_clear(RDR_TLCTL0, 1 << 4);
4823e9ee 665
d19770e5
ST
666 return 0;
667}
668
669static int get_resources(struct cx23885_dev *dev)
670{
9c8ced51
ST
671 if (request_mem_region(pci_resource_start(dev->pci, 0),
672 pci_resource_len(dev->pci, 0),
44a6481d 673 dev->name))
d19770e5
ST
674 return 0;
675
676 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
9c8ced51 677 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
d19770e5
ST
678
679 return -EBUSY;
680}
681
9c8ced51
ST
682static int cx23885_init_tsport(struct cx23885_dev *dev,
683 struct cx23885_tsport *port, int portno)
d19770e5 684{
22b4e64f 685 dprintk(1, "%s(portno=%d)\n", __func__, portno);
a6a3f140
ST
686
687 /* Transport bus init dma queue - Common settings */
688 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
689 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
b1b81f1d
ST
690 port->vld_misc_val = 0x0;
691 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
a6a3f140
ST
692
693 spin_lock_init(&port->slock);
694 port->dev = dev;
695 port->nr = portno;
696
697 INIT_LIST_HEAD(&port->mpegq.active);
d782ffa2 698 mutex_init(&port->frontends.lock);
7bdf84fc 699 INIT_LIST_HEAD(&port->frontends.felist);
d782ffa2
ST
700 port->frontends.active_fe_id = 0;
701
a739a7e4
ST
702 /* This should be hardcoded allow a single frontend
703 * attachment to this tsport, keeping the -dvb.c
704 * code clean and safe.
705 */
9c8ced51 706 if (!port->num_frontends)
a739a7e4
ST
707 port->num_frontends = 1;
708
9c8ced51 709 switch (portno) {
a6a3f140
ST
710 case 1:
711 port->reg_gpcnt = VID_B_GPCNT;
712 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
713 port->reg_dma_ctl = VID_B_DMA_CTL;
714 port->reg_lngth = VID_B_LNGTH;
715 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
716 port->reg_gen_ctrl = VID_B_GEN_CTL;
717 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
718 port->reg_sop_status = VID_B_SOP_STATUS;
719 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
720 port->reg_vld_misc = VID_B_VLD_MISC;
721 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
722 port->reg_src_sel = VID_B_SRC_SEL;
723 port->reg_ts_int_msk = VID_B_INT_MSK;
b1b81f1d 724 port->reg_ts_int_stat = VID_B_INT_STAT;
a6a3f140
ST
725 port->sram_chno = SRAM_CH03; /* VID_B */
726 port->pci_irqmask = 0x02; /* VID_B bit1 */
727 break;
728 case 2:
729 port->reg_gpcnt = VID_C_GPCNT;
730 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
731 port->reg_dma_ctl = VID_C_DMA_CTL;
732 port->reg_lngth = VID_C_LNGTH;
733 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
734 port->reg_gen_ctrl = VID_C_GEN_CTL;
735 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
736 port->reg_sop_status = VID_C_SOP_STATUS;
737 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
738 port->reg_vld_misc = VID_C_VLD_MISC;
739 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
740 port->reg_src_sel = 0;
741 port->reg_ts_int_msk = VID_C_INT_MSK;
742 port->reg_ts_int_stat = VID_C_INT_STAT;
743 port->sram_chno = SRAM_CH06; /* VID_C */
744 port->pci_irqmask = 0x04; /* VID_C bit2 */
d19770e5 745 break;
a6a3f140
ST
746 default:
747 BUG();
d19770e5
ST
748 }
749
750 return 0;
751}
752
0ac5881a
ST
753static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
754{
755 switch (cx_read(RDR_CFG2) & 0xff) {
756 case 0x00:
757 /* cx23885 */
758 dev->hwrevision = 0xa0;
759 break;
760 case 0x01:
761 /* CX23885-12Z */
762 dev->hwrevision = 0xa1;
763 break;
764 case 0x02:
25ea66e2 765 /* CX23885-13Z/14Z */
0ac5881a
ST
766 dev->hwrevision = 0xb0;
767 break;
768 case 0x03:
25ea66e2
ST
769 if (dev->pci->device == 0x8880) {
770 /* CX23888-21Z/22Z */
771 dev->hwrevision = 0xc0;
772 } else {
773 /* CX23885-14Z */
774 dev->hwrevision = 0xa4;
775 }
776 break;
777 case 0x04:
778 if (dev->pci->device == 0x8880) {
779 /* CX23888-31Z */
780 dev->hwrevision = 0xd0;
781 } else {
782 /* CX23885-15Z, CX23888-31Z */
783 dev->hwrevision = 0xa5;
784 }
0ac5881a
ST
785 break;
786 case 0x0e:
787 /* CX23887-15Z */
788 dev->hwrevision = 0xc0;
abe1def4 789 break;
0ac5881a
ST
790 case 0x0f:
791 /* CX23887-14Z */
792 dev->hwrevision = 0xb1;
793 break;
794 default:
795 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
22b4e64f 796 __func__, dev->hwrevision);
0ac5881a
ST
797 }
798 if (dev->hwrevision)
799 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
22b4e64f 800 __func__, dev->hwrevision);
0ac5881a
ST
801 else
802 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
22b4e64f 803 __func__, dev->hwrevision);
0ac5881a
ST
804}
805
29f8a0a5
AW
806/* Find the first v4l2_subdev member of the group id in hw */
807struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
808{
809 struct v4l2_subdev *result = NULL;
810 struct v4l2_subdev *sd;
811
812 spin_lock(&dev->v4l2_dev.lock);
813 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
814 if (sd->grp_id == hw) {
815 result = sd;
816 break;
817 }
818 }
819 spin_unlock(&dev->v4l2_dev.lock);
820 return result;
821}
822
d19770e5
ST
823static int cx23885_dev_setup(struct cx23885_dev *dev)
824{
825 int i;
826
dbe83a3b
AW
827 spin_lock_init(&dev->pci_irqmask_lock);
828
d19770e5 829 mutex_init(&dev->lock);
8386c27f 830 mutex_init(&dev->gpio_lock);
d19770e5
ST
831
832 atomic_inc(&dev->refcount);
833
834 dev->nr = cx23885_devcount++;
579f1163
ST
835 sprintf(dev->name, "cx23885[%d]", dev->nr);
836
579f1163 837 /* Configure the internal memory */
9c8ced51 838 if (dev->pci->device == 0x8880) {
25ea66e2 839 /* Could be 887 or 888, assume a default */
579f1163 840 dev->bridge = CX23885_BRIDGE_887;
c7712613
ST
841 /* Apply a sensible clock frequency for the PCIe bridge */
842 dev->clk_freq = 25000000;
7e994302 843 dev->sram_channels = cx23887_sram_channels;
579f1163 844 } else
9c8ced51 845 if (dev->pci->device == 0x8852) {
579f1163 846 dev->bridge = CX23885_BRIDGE_885;
c7712613
ST
847 /* Apply a sensible clock frequency for the PCIe bridge */
848 dev->clk_freq = 28000000;
7e994302 849 dev->sram_channels = cx23885_sram_channels;
579f1163
ST
850 } else
851 BUG();
852
853 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
22b4e64f 854 __func__, dev->bridge);
579f1163
ST
855
856 /* board config */
857 dev->board = UNSET;
858 if (card[dev->nr] < cx23885_bcount)
859 dev->board = card[dev->nr];
860 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
861 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
862 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
863 dev->board = cx23885_subids[i].card;
864 if (UNSET == dev->board) {
865 dev->board = CX23885_BOARD_UNKNOWN;
866 cx23885_card_list(dev);
867 }
868
c7712613
ST
869 /* If the user specific a clk freq override, apply it */
870 if (cx23885_boards[dev->board].clk_freq > 0)
871 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
872
d19770e5
ST
873 dev->pci_bus = dev->pci->bus->number;
874 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
dbe83a3b 875 cx23885_irq_add(dev, 0x001f00);
d19770e5
ST
876
877 /* External Master 1 Bus */
878 dev->i2c_bus[0].nr = 0;
879 dev->i2c_bus[0].dev = dev;
880 dev->i2c_bus[0].reg_stat = I2C1_STAT;
881 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
882 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
883 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
884 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
885 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
886
887 /* External Master 2 Bus */
888 dev->i2c_bus[1].nr = 1;
889 dev->i2c_bus[1].dev = dev;
890 dev->i2c_bus[1].reg_stat = I2C2_STAT;
891 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
892 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
893 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
894 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
895 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
896
897 /* Internal Master 3 Bus */
898 dev->i2c_bus[2].nr = 2;
899 dev->i2c_bus[2].dev = dev;
900 dev->i2c_bus[2].reg_stat = I2C3_STAT;
901 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
a2129af5 902 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
d19770e5
ST
903 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
904 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
905 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
906
b1b81f1d
ST
907 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
908 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
a6a3f140 909 cx23885_init_tsport(dev, &dev->ts1, 1);
579f1163 910
b1b81f1d
ST
911 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
912 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
a6a3f140 913 cx23885_init_tsport(dev, &dev->ts2, 2);
d19770e5 914
d19770e5
ST
915 if (get_resources(dev) < 0) {
916 printk(KERN_ERR "CORE %s No more PCIe resources for "
44a6481d
MK
917 "subsystem: %04x:%04x\n",
918 dev->name, dev->pci->subsystem_vendor,
919 dev->pci->subsystem_device);
d19770e5
ST
920
921 cx23885_devcount--;
fcf94c89 922 return -ENODEV;
d19770e5
ST
923 }
924
d19770e5 925 /* PCIe stuff */
9c8ced51
ST
926 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
927 pci_resource_len(dev->pci, 0));
d19770e5
ST
928
929 dev->bmmio = (u8 __iomem *)dev->lmmio;
930
d19770e5 931 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
44a6481d
MK
932 dev->name, dev->pci->subsystem_vendor,
933 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
934 dev->board, card[dev->nr] == dev->board ?
935 "insmod option" : "autodetected");
d19770e5 936
4823e9ee
ST
937 cx23885_pci_quirks(dev);
938
7b888014
ST
939 /* Assume some sensible defaults */
940 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
941 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
557f48d5 942 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
7b888014
ST
943 dev->radio_type = cx23885_boards[dev->board].radio_type;
944 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
945
557f48d5
IL
946 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
947 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
7b888014 948 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
22b4e64f 949 __func__, dev->radio_type, dev->radio_addr);
7b888014 950
f659c513
ST
951 /* The cx23417 encoder has GPIO's that need to be initialised
952 * before DVB, so that demodulators and tuners are out of
953 * reset before DVB uses them.
954 */
955 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
956 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
957 cx23885_mc417_init(dev);
958
d19770e5
ST
959 /* init hardware */
960 cx23885_reset(dev);
961
962 cx23885_i2c_register(&dev->i2c_bus[0]);
963 cx23885_i2c_register(&dev->i2c_bus[1]);
964 cx23885_i2c_register(&dev->i2c_bus[2]);
d19770e5 965 cx23885_card_setup(dev);
622b828a 966 call_all(dev, core, s_power, 0);
d19770e5
ST
967 cx23885_ir_init(dev);
968
7b888014
ST
969 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
970 if (cx23885_video_register(dev) < 0) {
971 printk(KERN_ERR "%s() Failed to register analog "
22b4e64f 972 "video adapters on VID_A\n", __func__);
7b888014
ST
973 }
974 }
975
976 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
10d0dcd7
IL
977 if (cx23885_boards[dev->board].num_fds_portb)
978 dev->ts1.num_frontends =
979 cx23885_boards[dev->board].num_fds_portb;
a6a3f140
ST
980 if (cx23885_dvb_register(&dev->ts1) < 0) {
981 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
22b4e64f 982 __func__);
a6a3f140 983 }
b1b81f1d
ST
984 } else
985 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
986 if (cx23885_417_register(dev) < 0) {
987 printk(KERN_ERR
988 "%s() Failed to register 417 on VID_B\n",
989 __func__);
990 }
579f1163
ST
991 }
992
7b888014 993 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
10d0dcd7
IL
994 if (cx23885_boards[dev->board].num_fds_portc)
995 dev->ts2.num_frontends =
996 cx23885_boards[dev->board].num_fds_portc;
a6a3f140 997 if (cx23885_dvb_register(&dev->ts2) < 0) {
b1b81f1d
ST
998 printk(KERN_ERR
999 "%s() Failed to register dvb on VID_C\n",
1000 __func__);
1001 }
1002 } else
1003 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1004 if (cx23885_417_register(dev) < 0) {
1005 printk(KERN_ERR
1006 "%s() Failed to register 417 on VID_C\n",
22b4e64f 1007 __func__);
a6a3f140 1008 }
d19770e5
ST
1009 }
1010
0ac5881a
ST
1011 cx23885_dev_checkrevision(dev);
1012
702dd790
IL
1013 /* disable MSI for NetUP cards, otherwise CI is not working */
1014 if (cx23885_boards[dev->board].ci_type > 0)
1015 cx_clear(RDR_RDRCTL1, 1 << 8);
1016
7b134e85
IL
1017 switch (dev->board) {
1018 case CX23885_BOARD_TEVII_S470:
1019 case CX23885_BOARD_TEVII_S471:
1020 cx_clear(RDR_RDRCTL1, 1 << 8);
1021 break;
1022 }
1023
d19770e5 1024 return 0;
d19770e5
ST
1025}
1026
39e75cfe 1027static void cx23885_dev_unregister(struct cx23885_dev *dev)
d19770e5 1028{
9c8ced51
ST
1029 release_mem_region(pci_resource_start(dev->pci, 0),
1030 pci_resource_len(dev->pci, 0));
d19770e5
ST
1031
1032 if (!atomic_dec_and_test(&dev->refcount))
1033 return;
1034
7b888014
ST
1035 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1036 cx23885_video_unregister(dev);
1037
b1b81f1d 1038 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
a6a3f140
ST
1039 cx23885_dvb_unregister(&dev->ts1);
1040
b1b81f1d
ST
1041 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1042 cx23885_417_unregister(dev);
1043
1044 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
a6a3f140
ST
1045 cx23885_dvb_unregister(&dev->ts2);
1046
b1b81f1d
ST
1047 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1048 cx23885_417_unregister(dev);
1049
d19770e5
ST
1050 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1051 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1052 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1053
1054 iounmap(dev->lmmio);
1055}
1056
9c8ced51 1057static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
44a6481d
MK
1058 unsigned int offset, u32 sync_line,
1059 unsigned int bpl, unsigned int padding,
453afdd9 1060 unsigned int lines, unsigned int lpi, bool jump)
d19770e5
ST
1061{
1062 struct scatterlist *sg;
9e44d632 1063 unsigned int line, todo, sol;
d19770e5 1064
453afdd9
HV
1065
1066 if (jump) {
1067 *(rp++) = cpu_to_le32(RISC_JUMP);
1068 *(rp++) = cpu_to_le32(0);
1069 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1070 }
1071
d19770e5
ST
1072 /* sync instruction */
1073 if (sync_line != NO_SYNC_LINE)
1074 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1075
1076 /* scan lines */
1077 sg = sglist;
1078 for (line = 0; line < lines; line++) {
1079 while (offset && offset >= sg_dma_len(sg)) {
1080 offset -= sg_dma_len(sg);
7675fe99 1081 sg = sg_next(sg);
d19770e5 1082 }
9e44d632
MM
1083
1084 if (lpi && line > 0 && !(line % lpi))
1085 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1086 else
1087 sol = RISC_SOL;
1088
d19770e5
ST
1089 if (bpl <= sg_dma_len(sg)-offset) {
1090 /* fits into current chunk */
9e44d632 1091 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
9c8ced51
ST
1092 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1093 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1094 offset += bpl;
d19770e5
ST
1095 } else {
1096 /* scanline needs to be split */
1097 todo = bpl;
9e44d632 1098 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
d19770e5 1099 (sg_dma_len(sg)-offset));
9c8ced51
ST
1100 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1102 todo -= (sg_dma_len(sg)-offset);
1103 offset = 0;
7675fe99 1104 sg = sg_next(sg);
d19770e5 1105 while (todo > sg_dma_len(sg)) {
9c8ced51 1106 *(rp++) = cpu_to_le32(RISC_WRITE|
d19770e5 1107 sg_dma_len(sg));
9c8ced51
ST
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5 1110 todo -= sg_dma_len(sg);
7675fe99 1111 sg = sg_next(sg);
d19770e5 1112 }
9c8ced51
ST
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1115 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
d19770e5
ST
1116 offset += todo;
1117 }
1118 offset += padding;
1119 }
1120
1121 return rp;
1122}
1123
4d63a25c 1124int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
7b888014
ST
1125 struct scatterlist *sglist, unsigned int top_offset,
1126 unsigned int bottom_offset, unsigned int bpl,
1127 unsigned int padding, unsigned int lines)
1128{
1129 u32 instructions, fields;
d8eaa58b 1130 __le32 *rp;
7b888014
ST
1131
1132 fields = 0;
1133 if (UNSET != top_offset)
1134 fields++;
1135 if (UNSET != bottom_offset)
1136 fields++;
1137
1138 /* estimate risc mem: worst case is one write per page border +
1139 one write per scan line + syncs + jump (all 2 dwords). Padding
1140 can cause next bpl to start close to a page border. First DMA
1141 region may be smaller than PAGE_SIZE */
1142 /* write and jump need and extra dword */
9c8ced51
ST
1143 instructions = fields * (1 + ((bpl + padding) * lines)
1144 / PAGE_SIZE + lines);
453afdd9 1145 instructions += 5;
4d63a25c
HV
1146 risc->size = instructions * 12;
1147 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1148 if (risc->cpu == NULL)
1149 return -ENOMEM;
7b888014
ST
1150
1151 /* write risc instructions */
1152 rp = risc->cpu;
1153 if (UNSET != top_offset)
1154 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1155 bpl, padding, lines, 0, true);
7b888014
ST
1156 if (UNSET != bottom_offset)
1157 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1158 bpl, padding, lines, 0, UNSET == top_offset);
7b888014
ST
1159
1160 /* save pointer to jmp instruction address */
1161 risc->jmp = rp;
9c8ced51 1162 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
7b888014
ST
1163 return 0;
1164}
d19770e5 1165
9e44d632 1166int cx23885_risc_databuffer(struct pci_dev *pci,
4d63a25c 1167 struct cx23885_riscmem *risc,
39e75cfe
AB
1168 struct scatterlist *sglist,
1169 unsigned int bpl,
9e44d632 1170 unsigned int lines, unsigned int lpi)
d19770e5
ST
1171{
1172 u32 instructions;
d8eaa58b 1173 __le32 *rp;
d19770e5
ST
1174
1175 /* estimate risc mem: worst case is one write per page border +
1176 one write per scan line + syncs + jump (all 2 dwords). Here
1177 there is no padding and no sync. First DMA region may be smaller
1178 than PAGE_SIZE */
1179 /* Jump and write need an extra dword */
1180 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
453afdd9 1181 instructions += 4;
d19770e5 1182
4d63a25c
HV
1183 risc->size = instructions * 12;
1184 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1185 if (risc->cpu == NULL)
1186 return -ENOMEM;
d19770e5
ST
1187
1188 /* write risc instructions */
1189 rp = risc->cpu;
9e44d632 1190 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
453afdd9 1191 bpl, 0, lines, lpi, lpi == 0);
d19770e5
ST
1192
1193 /* save pointer to jmp instruction address */
1194 risc->jmp = rp;
9c8ced51 1195 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
d19770e5
ST
1196 return 0;
1197}
1198
4d63a25c 1199int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
5ab27e6d
ST
1200 struct scatterlist *sglist, unsigned int top_offset,
1201 unsigned int bottom_offset, unsigned int bpl,
1202 unsigned int padding, unsigned int lines)
1203{
1204 u32 instructions, fields;
1205 __le32 *rp;
5ab27e6d
ST
1206
1207 fields = 0;
1208 if (UNSET != top_offset)
1209 fields++;
1210 if (UNSET != bottom_offset)
1211 fields++;
1212
1213 /* estimate risc mem: worst case is one write per page border +
1214 one write per scan line + syncs + jump (all 2 dwords). Padding
1215 can cause next bpl to start close to a page border. First DMA
1216 region may be smaller than PAGE_SIZE */
1217 /* write and jump need and extra dword */
1218 instructions = fields * (1 + ((bpl + padding) * lines)
1219 / PAGE_SIZE + lines);
453afdd9 1220 instructions += 5;
4d63a25c
HV
1221 risc->size = instructions * 12;
1222 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1223 if (risc->cpu == NULL)
1224 return -ENOMEM;
5ab27e6d
ST
1225 /* write risc instructions */
1226 rp = risc->cpu;
1227
1228 /* Sync to line 6, so US CC line 21 will appear in line '12'
1229 * in the userland vbi payload */
1230 if (UNSET != top_offset)
420b2176 1231 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
453afdd9 1232 bpl, padding, lines, 0, true);
5ab27e6d
ST
1233
1234 if (UNSET != bottom_offset)
420b2176 1235 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
453afdd9 1236 bpl, padding, lines, 0, UNSET == top_offset);
5ab27e6d
ST
1237
1238
1239
1240 /* save pointer to jmp instruction address */
1241 risc->jmp = rp;
1242 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1243 return 0;
1244}
1245
1246
453afdd9 1247void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
d19770e5 1248{
4d63a25c
HV
1249 struct cx23885_riscmem *risc = &buf->risc;
1250
d19770e5 1251 BUG_ON(in_interrupt());
4d63a25c 1252 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
d19770e5
ST
1253}
1254
7b888014
ST
1255static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1256{
1257 struct cx23885_dev *dev = port->dev;
1258
22b4e64f
HH
1259 dprintk(1, "%s() Register Dump\n", __func__);
1260 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
7b888014 1261 cx_read(DEV_CNTRL2));
22b4e64f 1262 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
dbe83a3b 1263 cx23885_irq_get_mask(dev));
22b4e64f 1264 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
7b888014 1265 cx_read(AUDIO_INT_INT_MSK));
22b4e64f 1266 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
7b888014 1267 cx_read(AUD_INT_DMA_CTL));
22b4e64f 1268 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
7b888014 1269 cx_read(AUDIO_EXT_INT_MSK));
22b4e64f 1270 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
7b888014 1271 cx_read(AUD_EXT_DMA_CTL));
22b4e64f 1272 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
7b888014 1273 cx_read(PAD_CTRL));
22b4e64f 1274 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
7b888014 1275 cx_read(ALT_PIN_OUT_SEL));
22b4e64f 1276 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
7b888014 1277 cx_read(GPIO2));
22b4e64f 1278 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
7b888014 1279 port->reg_gpcnt, cx_read(port->reg_gpcnt));
22b4e64f 1280 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1281 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
22b4e64f 1282 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
7b888014 1283 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
7b913908
ST
1284 if (port->reg_src_sel)
1285 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1286 port->reg_src_sel, cx_read(port->reg_src_sel));
22b4e64f 1287 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
7b888014 1288 port->reg_lngth, cx_read(port->reg_lngth));
22b4e64f 1289 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1290 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
22b4e64f 1291 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
7b888014 1292 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
22b4e64f 1293 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
7b888014 1294 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
22b4e64f 1295 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
7b888014 1296 port->reg_sop_status, cx_read(port->reg_sop_status));
22b4e64f 1297 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
7b888014 1298 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
22b4e64f 1299 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
7b888014 1300 port->reg_vld_misc, cx_read(port->reg_vld_misc));
22b4e64f 1301 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
7b888014 1302 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
22b4e64f 1303 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
7b888014
ST
1304 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1305}
1306
453afdd9 1307int cx23885_start_dma(struct cx23885_tsport *port,
44a6481d
MK
1308 struct cx23885_dmaqueue *q,
1309 struct cx23885_buffer *buf)
d19770e5
ST
1310{
1311 struct cx23885_dev *dev = port->dev;
a589b665 1312 u32 reg;
d19770e5 1313
22b4e64f 1314 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
453afdd9 1315 dev->width, dev->height, dev->field);
d19770e5 1316
d8d12b43
ST
1317 /* Stop the fifo and risc engine for this port */
1318 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1319
d19770e5
ST
1320 /* setup fifo + format */
1321 cx23885_sram_channel_setup(dev,
9c8ced51 1322 &dev->sram_channels[port->sram_chno],
44a6481d 1323 port->ts_packet_size, buf->risc.dma);
9c8ced51
ST
1324 if (debug > 5) {
1325 cx23885_sram_channel_dump(dev,
1326 &dev->sram_channels[port->sram_chno]);
d19770e5 1327 cx23885_risc_disasm(port, &buf->risc);
3328e4fb 1328 }
d19770e5
ST
1329
1330 /* write TS length to chip */
453afdd9 1331 cx_write(port->reg_lngth, port->ts_packet_size);
d19770e5 1332
9c8ced51
ST
1333 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1334 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1335 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
22b4e64f 1336 __func__,
661c7e44 1337 cx23885_boards[dev->board].portb,
9c8ced51 1338 cx23885_boards[dev->board].portc);
d19770e5
ST
1339 return -EINVAL;
1340 }
1341
a589b665
ST
1342 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1343 cx23885_av_clk(dev, 0);
1344
d19770e5
ST
1345 udelay(100);
1346
579f1163 1347 /* If the port supports SRC SELECT, configure it */
9c8ced51 1348 if (port->reg_src_sel)
579f1163
ST
1349 cx_write(port->reg_src_sel, port->src_sel_val);
1350
b1b81f1d 1351 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
d19770e5 1352 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
b1b81f1d 1353 cx_write(port->reg_vld_misc, port->vld_misc_val);
d19770e5
ST
1354 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1355 udelay(100);
1356
9c8ced51 1357 /* NOTE: this is 2 (reserved) for portb, does it matter? */
d19770e5
ST
1358 /* reset counter to zero */
1359 cx_write(port->reg_gpcnt_ctl, 3);
453afdd9 1360 q->count = 0;
d19770e5 1361
52ce27bf
ST
1362 /* Set VIDB pins to input */
1363 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1364 reg = cx_read(PAD_CTRL);
1365 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1366 cx_write(PAD_CTRL, reg);
1367 }
1368
1369 /* Set VIDC pins to input */
1370 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1371 reg = cx_read(PAD_CTRL);
1372 reg &= ~0x4; /* Clear TS2_SOP_OE */
1373 cx_write(PAD_CTRL, reg);
1374 }
1375
1376 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1377
1378 reg = cx_read(PAD_CTRL);
1379 reg = reg & ~0x1; /* Clear TS1_OE */
1380
1381 /* FIXME, bit 2 writing here is questionable */
1382 /* set TS1_SOP_OE and TS1_OE_HI */
1383 reg = reg | 0xa;
1384 cx_write(PAD_CTRL, reg);
1385
1386 /* FIXME and these two registers should be documented. */
1387 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1388 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1389 }
1390
9c8ced51 1391 switch (dev->bridge) {
d19770e5 1392 case CX23885_BRIDGE_885:
3bd40659 1393 case CX23885_BRIDGE_887:
25ea66e2 1394 case CX23885_BRIDGE_888:
d19770e5 1395 /* enable irqs */
9c8ced51 1396 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
d19770e5
ST
1397 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1398 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
dbe83a3b
AW
1399 cx23885_irq_add(dev, port->pci_irqmask);
1400 cx23885_irq_enable_all(dev);
d19770e5 1401 break;
d19770e5 1402 default:
579f1163 1403 BUG();
d19770e5
ST
1404 }
1405
d19770e5
ST
1406 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1407
a589b665
ST
1408 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1409 cx23885_av_clk(dev, 1);
1410
7b888014
ST
1411 if (debug > 4)
1412 cx23885_tsport_reg_dump(port);
1413
d19770e5
ST
1414 return 0;
1415}
1416
1417static int cx23885_stop_dma(struct cx23885_tsport *port)
1418{
1419 struct cx23885_dev *dev = port->dev;
a589b665
ST
1420 u32 reg;
1421
22b4e64f 1422 dprintk(1, "%s()\n", __func__);
d19770e5
ST
1423
1424 /* Stop interrupts and DMA */
1425 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1426 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1427
52ce27bf 1428 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
a589b665
ST
1429
1430 reg = cx_read(PAD_CTRL);
1431
1432 /* Set TS1_OE */
1433 reg = reg | 0x1;
1434
1435 /* clear TS1_SOP_OE and TS1_OE_HI */
1436 reg = reg & ~0xa;
1437 cx_write(PAD_CTRL, reg);
1438 cx_write(port->reg_src_sel, 0);
1439 cx_write(port->reg_gen_ctrl, 8);
1440
1441 }
1442
1443 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1444 cx23885_av_clk(dev, 0);
1445
d19770e5
ST
1446 return 0;
1447}
1448
d19770e5
ST
1449/* ------------------------------------------------------------------ */
1450
453afdd9 1451int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
d19770e5
ST
1452{
1453 struct cx23885_dev *dev = port->dev;
1454 int size = port->ts_packet_size * port->ts_packet_count;
453afdd9 1455 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
d19770e5 1456
22b4e64f 1457 dprintk(1, "%s: %p\n", __func__, buf);
453afdd9 1458 if (vb2_plane_size(&buf->vb, 0) < size)
d19770e5 1459 return -EINVAL;
453afdd9 1460 vb2_set_plane_payload(&buf->vb, 0, size);
d19770e5 1461
453afdd9
HV
1462 cx23885_risc_databuffer(dev->pci, &buf->risc,
1463 sgt->sgl,
1464 port->ts_packet_size, port->ts_packet_count, 0);
1465 return 0;
d19770e5
ST
1466}
1467
453afdd9
HV
1468/*
1469 * The risc program for each buffer works as follows: it starts with a simple
1470 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1471 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1472 * the initial JUMP).
1473 *
1474 * This is the risc program of the first buffer to be queued if the active list
1475 * is empty and it just keeps DMAing this buffer without generating any
1476 * interrupts.
1477 *
1478 * If a new buffer is added then the initial JUMP in the code for that buffer
1479 * will generate an interrupt which signals that the previous buffer has been
1480 * DMAed successfully and that it can be returned to userspace.
1481 *
1482 * It also sets the final jump of the previous buffer to the start of the new
1483 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1484 * atomic u32 write, so there is no race condition.
1485 *
1486 * The end-result of all this that you only get an interrupt when a buffer
1487 * is ready, so the control flow is very easy.
1488 */
d19770e5
ST
1489void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1490{
1491 struct cx23885_buffer *prev;
1492 struct cx23885_dev *dev = port->dev;
1493 struct cx23885_dmaqueue *cx88q = &port->mpegq;
453afdd9 1494 unsigned long flags;
d19770e5 1495
453afdd9
HV
1496 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1497 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1498 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
d19770e5
ST
1499 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1500
453afdd9 1501 spin_lock_irqsave(&dev->slock, flags);
d19770e5 1502 if (list_empty(&cx88q->active)) {
453afdd9 1503 list_add_tail(&buf->queue, &cx88q->active);
44a6481d 1504 dprintk(1, "[%p/%d] %s - first active\n",
453afdd9 1505 buf, buf->vb.v4l2_buf.index, __func__);
d19770e5 1506 } else {
453afdd9 1507 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
44a6481d 1508 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
453afdd9
HV
1509 queue);
1510 list_add_tail(&buf->queue, &cx88q->active);
d19770e5 1511 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
9c8ced51 1512 dprintk(1, "[%p/%d] %s - append to active\n",
453afdd9 1513 buf, buf->vb.v4l2_buf.index, __func__);
d19770e5 1514 }
453afdd9 1515 spin_unlock_irqrestore(&dev->slock, flags);
d19770e5
ST
1516}
1517
1518/* ----------------------------------------------------------- */
1519
453afdd9 1520static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
d19770e5
ST
1521{
1522 struct cx23885_dev *dev = port->dev;
1523 struct cx23885_dmaqueue *q = &port->mpegq;
1524 struct cx23885_buffer *buf;
1525 unsigned long flags;
1526
44a6481d 1527 spin_lock_irqsave(&port->slock, flags);
d19770e5 1528 while (!list_empty(&q->active)) {
44a6481d 1529 buf = list_entry(q->active.next, struct cx23885_buffer,
453afdd9
HV
1530 queue);
1531 list_del(&buf->queue);
1532 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
44a6481d 1533 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
453afdd9 1534 buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma);
d19770e5 1535 }
44a6481d 1536 spin_unlock_irqrestore(&port->slock, flags);
d19770e5
ST
1537}
1538
b1b81f1d
ST
1539void cx23885_cancel_buffers(struct cx23885_tsport *port)
1540{
1541 struct cx23885_dev *dev = port->dev;
d19770e5 1542
9c8ced51 1543 dprintk(1, "%s()\n", __func__);
d19770e5 1544 cx23885_stop_dma(port);
453afdd9 1545 do_cancel_buffers(port, "cancel");
d19770e5
ST
1546}
1547
b1b81f1d
ST
1548int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1549{
1550 /* FIXME: port1 assumption here. */
1551 struct cx23885_tsport *port = &dev->ts1;
1552 int count = 0;
1553 int handled = 0;
1554
1555 if (status == 0)
1556 return handled;
1557
1558 count = cx_read(port->reg_gpcnt);
1559 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1560 status, cx_read(port->reg_ts_int_msk), count);
1561
1562 if ((status & VID_B_MSK_BAD_PKT) ||
1563 (status & VID_B_MSK_OPC_ERR) ||
1564 (status & VID_B_MSK_VBI_OPC_ERR) ||
1565 (status & VID_B_MSK_SYNC) ||
1566 (status & VID_B_MSK_VBI_SYNC) ||
1567 (status & VID_B_MSK_OF) ||
1568 (status & VID_B_MSK_VBI_OF)) {
1569 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1570 "= 0x%x\n", dev->name, status);
1571 if (status & VID_B_MSK_BAD_PKT)
1572 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1573 if (status & VID_B_MSK_OPC_ERR)
1574 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1575 if (status & VID_B_MSK_VBI_OPC_ERR)
1576 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1577 if (status & VID_B_MSK_SYNC)
1578 dprintk(1, " VID_B_MSK_SYNC\n");
1579 if (status & VID_B_MSK_VBI_SYNC)
1580 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1581 if (status & VID_B_MSK_OF)
1582 dprintk(1, " VID_B_MSK_OF\n");
1583 if (status & VID_B_MSK_VBI_OF)
1584 dprintk(1, " VID_B_MSK_VBI_OF\n");
1585
1586 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1587 cx23885_sram_channel_dump(dev,
1588 &dev->sram_channels[port->sram_chno]);
1589 cx23885_417_check_encoder(dev);
1590 } else if (status & VID_B_MSK_RISCI1) {
1591 dprintk(7, " VID_B_MSK_RISCI1\n");
1592 spin_lock(&port->slock);
1593 cx23885_wakeup(port, &port->mpegq, count);
1594 spin_unlock(&port->slock);
b1b81f1d
ST
1595 }
1596 if (status) {
1597 cx_write(port->reg_ts_int_stat, status);
1598 handled = 1;
1599 }
1600
1601 return handled;
1602}
1603
a6a3f140
ST
1604static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1605{
1606 struct cx23885_dev *dev = port->dev;
1607 int handled = 0;
1608 u32 count;
1609
b1b81f1d
ST
1610 if ((status & VID_BC_MSK_OPC_ERR) ||
1611 (status & VID_BC_MSK_BAD_PKT) ||
1612 (status & VID_BC_MSK_SYNC) ||
9c8ced51
ST
1613 (status & VID_BC_MSK_OF)) {
1614
a6a3f140 1615 if (status & VID_BC_MSK_OPC_ERR)
9c8ced51
ST
1616 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1617 VID_BC_MSK_OPC_ERR);
1618
a6a3f140 1619 if (status & VID_BC_MSK_BAD_PKT)
9c8ced51
ST
1620 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1621 VID_BC_MSK_BAD_PKT);
1622
a6a3f140 1623 if (status & VID_BC_MSK_SYNC)
9c8ced51
ST
1624 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1625 VID_BC_MSK_SYNC);
1626
a6a3f140 1627 if (status & VID_BC_MSK_OF)
9c8ced51
ST
1628 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1629 VID_BC_MSK_OF);
a6a3f140
ST
1630
1631 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1632
1633 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
b1b81f1d
ST
1634 cx23885_sram_channel_dump(dev,
1635 &dev->sram_channels[port->sram_chno]);
a6a3f140
ST
1636
1637 } else if (status & VID_BC_MSK_RISCI1) {
1638
1639 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1640
1641 spin_lock(&port->slock);
1642 count = cx_read(port->reg_gpcnt);
1643 cx23885_wakeup(port, &port->mpegq, count);
1644 spin_unlock(&port->slock);
1645
a6a3f140
ST
1646 }
1647 if (status) {
1648 cx_write(port->reg_ts_int_stat, status);
1649 handled = 1;
1650 }
1651
1652 return handled;
1653}
1654
03121f05 1655static irqreturn_t cx23885_irq(int irq, void *dev_id)
d19770e5
ST
1656{
1657 struct cx23885_dev *dev = dev_id;
a6a3f140
ST
1658 struct cx23885_tsport *ts1 = &dev->ts1;
1659 struct cx23885_tsport *ts2 = &dev->ts2;
d19770e5 1660 u32 pci_status, pci_mask;
7b888014 1661 u32 vida_status, vida_mask;
9e44d632 1662 u32 audint_status, audint_mask;
6f074abb 1663 u32 ts1_status, ts1_mask;
d19770e5 1664 u32 ts2_status, ts2_mask;
7b888014 1665 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
9e44d632 1666 int audint_count = 0;
98d109f9 1667 bool subdev_handled;
d19770e5
ST
1668
1669 pci_status = cx_read(PCI_INT_STAT);
dbe83a3b 1670 pci_mask = cx23885_irq_get_mask(dev);
7b888014
ST
1671 vida_status = cx_read(VID_A_INT_STAT);
1672 vida_mask = cx_read(VID_A_INT_MSK);
9e44d632
MM
1673 audint_status = cx_read(AUDIO_INT_INT_STAT);
1674 audint_mask = cx_read(AUDIO_INT_INT_MSK);
6f074abb
ST
1675 ts1_status = cx_read(VID_B_INT_STAT);
1676 ts1_mask = cx_read(VID_B_INT_MSK);
d19770e5
ST
1677 ts2_status = cx_read(VID_C_INT_STAT);
1678 ts2_mask = cx_read(VID_C_INT_MSK);
1679
9c8ced51 1680 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
d19770e5
ST
1681 goto out;
1682
7b888014 1683 vida_count = cx_read(VID_A_GPCNT);
9e44d632 1684 audint_count = cx_read(AUD_INT_A_GPCNT);
a6a3f140
ST
1685 ts1_count = cx_read(ts1->reg_gpcnt);
1686 ts2_count = cx_read(ts2->reg_gpcnt);
7b888014
ST
1687 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1688 pci_status, pci_mask);
1689 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1690 vida_status, vida_mask, vida_count);
9e44d632
MM
1691 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1692 audint_status, audint_mask, audint_count);
7b888014
ST
1693 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1694 ts1_status, ts1_mask, ts1_count);
1695 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1696 ts2_status, ts2_mask, ts2_count);
d19770e5 1697
f59ad611
AW
1698 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1699 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1700 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1701 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1702 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
98d109f9 1703 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
d19770e5
ST
1704
1705 if (pci_status & PCI_MSK_RISC_RD)
9c8ced51
ST
1706 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1707 PCI_MSK_RISC_RD);
1708
d19770e5 1709 if (pci_status & PCI_MSK_RISC_WR)
9c8ced51
ST
1710 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1711 PCI_MSK_RISC_WR);
1712
d19770e5 1713 if (pci_status & PCI_MSK_AL_RD)
9c8ced51
ST
1714 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1715 PCI_MSK_AL_RD);
1716
d19770e5 1717 if (pci_status & PCI_MSK_AL_WR)
9c8ced51
ST
1718 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1719 PCI_MSK_AL_WR);
1720
d19770e5 1721 if (pci_status & PCI_MSK_APB_DMA)
9c8ced51
ST
1722 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1723 PCI_MSK_APB_DMA);
1724
d19770e5 1725 if (pci_status & PCI_MSK_VID_C)
9c8ced51
ST
1726 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1727 PCI_MSK_VID_C);
1728
d19770e5 1729 if (pci_status & PCI_MSK_VID_B)
9c8ced51
ST
1730 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1731 PCI_MSK_VID_B);
1732
d19770e5 1733 if (pci_status & PCI_MSK_VID_A)
9c8ced51
ST
1734 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1735 PCI_MSK_VID_A);
1736
d19770e5 1737 if (pci_status & PCI_MSK_AUD_INT)
9c8ced51
ST
1738 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1739 PCI_MSK_AUD_INT);
1740
d19770e5 1741 if (pci_status & PCI_MSK_AUD_EXT)
9c8ced51
ST
1742 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1743 PCI_MSK_AUD_EXT);
d19770e5 1744
5a23b076
IL
1745 if (pci_status & PCI_MSK_GPIO0)
1746 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1747 PCI_MSK_GPIO0);
1748
1749 if (pci_status & PCI_MSK_GPIO1)
1750 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1751 PCI_MSK_GPIO1);
f59ad611 1752
98d109f9
AW
1753 if (pci_status & PCI_MSK_AV_CORE)
1754 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1755 PCI_MSK_AV_CORE);
1756
f59ad611
AW
1757 if (pci_status & PCI_MSK_IR)
1758 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1759 PCI_MSK_IR);
d19770e5
ST
1760 }
1761
78db8547
IL
1762 if (cx23885_boards[dev->board].ci_type == 1 &&
1763 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1764 handled += netup_ci_slot_status(dev, pci_status);
a26ccc9d 1765
78db8547
IL
1766 if (cx23885_boards[dev->board].ci_type == 2 &&
1767 (pci_status & PCI_MSK_GPIO0))
1768 handled += altera_ci_irq(dev);
5a23b076 1769
7b888014
ST
1770 if (ts1_status) {
1771 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1772 handled += cx23885_irq_ts(ts1, ts1_status);
b1b81f1d
ST
1773 else
1774 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1775 handled += cx23885_irq_417(dev, ts1_status);
7b888014
ST
1776 }
1777
1778 if (ts2_status) {
1779 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1780 handled += cx23885_irq_ts(ts2, ts2_status);
b1b81f1d
ST
1781 else
1782 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1783 handled += cx23885_irq_417(dev, ts2_status);
7b888014 1784 }
6f074abb 1785
7b888014
ST
1786 if (vida_status)
1787 handled += cx23885_video_irq(dev, vida_status);
6f074abb 1788
9e44d632
MM
1789 if (audint_status)
1790 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1791
f59ad611 1792 if (pci_status & PCI_MSK_IR) {
98d109f9 1793 subdev_handled = false;
260e689b 1794 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
98d109f9
AW
1795 pci_status, &subdev_handled);
1796 if (subdev_handled)
1797 handled++;
1798 }
1799
e5514f10
AW
1800 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1801 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
c21412f5 1802 schedule_work(&dev->cx25840_work);
e5514f10 1803 handled++;
f59ad611
AW
1804 }
1805
6f074abb
ST
1806 if (handled)
1807 cx_write(PCI_INT_STAT, pci_status);
d19770e5
ST
1808out:
1809 return IRQ_RETVAL(handled);
1810}
1811
f59ad611
AW
1812static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1813 unsigned int notification, void *arg)
1814{
1815 struct cx23885_dev *dev;
1816
1817 if (sd == NULL)
1818 return;
1819
1820 dev = to_cx23885(sd->v4l2_dev);
1821
1822 switch (notification) {
e5514f10 1823 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1824 if (sd == dev->sd_ir)
1825 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1826 break;
e5514f10 1827 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
f59ad611
AW
1828 if (sd == dev->sd_ir)
1829 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1830 break;
1831 }
1832}
1833
1834static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1835{
e5514f10 1836 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
f59ad611
AW
1837 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1838 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1839 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1840}
1841
6de72bd6 1842static inline int encoder_on_portb(struct cx23885_dev *dev)
6f8bee9b
ST
1843{
1844 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1845}
1846
6de72bd6 1847static inline int encoder_on_portc(struct cx23885_dev *dev)
6f8bee9b
ST
1848{
1849 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1850}
1851
1852/* Mask represents 32 different GPIOs, GPIO's are split into multiple
1853 * registers depending on the board configuration (and whether the
1854 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1855 * be pushed into the correct hardware register, regardless of the
1856 * physical location. Certain registers are shared so we sanity check
1857 * and report errors if we think we're tampering with a GPIo that might
1858 * be assigned to the encoder (and used for the host bus).
1859 *
1860 * GPIO 2 thru 0 - On the cx23885 bridge
1861 * GPIO 18 thru 3 - On the cx23417 host bus interface
1862 * GPIO 23 thru 19 - On the cx25840 a/v core
1863 */
1864void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1865{
1866 if (mask & 0x7)
1867 cx_set(GP0_IO, mask & 0x7);
1868
1869 if (mask & 0x0007fff8) {
1870 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1871 printk(KERN_ERR
1872 "%s: Setting GPIO on encoder ports\n",
1873 dev->name);
1874 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1875 }
1876
1877 /* TODO: 23-19 */
1878 if (mask & 0x00f80000)
1879 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1880}
1881
1882void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1883{
1884 if (mask & 0x00000007)
1885 cx_clear(GP0_IO, mask & 0x7);
1886
1887 if (mask & 0x0007fff8) {
1888 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1889 printk(KERN_ERR
1890 "%s: Clearing GPIO moving on encoder ports\n",
1891 dev->name);
1892 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1893 }
1894
1895 /* TODO: 23-19 */
1896 if (mask & 0x00f80000)
1897 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1898}
1899
09ea33e5
IL
1900u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1901{
1902 if (mask & 0x00000007)
1903 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1904
1905 if (mask & 0x0007fff8) {
1906 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1907 printk(KERN_ERR
1908 "%s: Reading GPIO moving on encoder ports\n",
1909 dev->name);
1910 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1911 }
1912
1913 /* TODO: 23-19 */
1914 if (mask & 0x00f80000)
1915 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1916
1917 return 0;
1918}
1919
6f8bee9b
ST
1920void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1921{
1922 if ((mask & 0x00000007) && asoutput)
1923 cx_set(GP0_IO, (mask & 0x7) << 16);
1924 else if ((mask & 0x00000007) && !asoutput)
1925 cx_clear(GP0_IO, (mask & 0x7) << 16);
1926
1927 if (mask & 0x0007fff8) {
1928 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1929 printk(KERN_ERR
1930 "%s: Enabling GPIO on encoder ports\n",
1931 dev->name);
1932 }
1933
1934 /* MC417_OEN is active low for output, write 1 for an input */
1935 if ((mask & 0x0007fff8) && asoutput)
1936 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1937
1938 else if ((mask & 0x0007fff8) && !asoutput)
1939 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1940
1941 /* TODO: 23-19 */
1942}
1943
4c62e976
GKH
1944static int cx23885_initdev(struct pci_dev *pci_dev,
1945 const struct pci_device_id *pci_id)
d19770e5
ST
1946{
1947 struct cx23885_dev *dev;
da59a4de 1948 struct v4l2_ctrl_handler *hdl;
d19770e5
ST
1949 int err;
1950
44a6481d 1951 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
d19770e5
ST
1952 if (NULL == dev)
1953 return -ENOMEM;
1954
c0714f6c
HV
1955 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1956 if (err < 0)
1957 goto fail_free;
1958
da59a4de
HV
1959 hdl = &dev->ctrl_handler;
1960 v4l2_ctrl_handler_init(hdl, 6);
1961 if (hdl->error) {
1962 err = hdl->error;
1963 goto fail_ctrl;
1964 }
1965 dev->v4l2_dev.ctrl_handler = hdl;
1966
f59ad611
AW
1967 /* Prepare to handle notifications from subdevices */
1968 cx23885_v4l2_dev_notify_init(dev);
1969
d19770e5
ST
1970 /* pci init */
1971 dev->pci = pci_dev;
1972 if (pci_enable_device(pci_dev)) {
1973 err = -EIO;
da59a4de 1974 goto fail_ctrl;
d19770e5
ST
1975 }
1976
1977 if (cx23885_dev_setup(dev) < 0) {
1978 err = -EINVAL;
da59a4de 1979 goto fail_ctrl;
d19770e5
ST
1980 }
1981
1982 /* print pci info */
abd34d8d 1983 dev->pci_rev = pci_dev->revision;
d19770e5
ST
1984 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
1985 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1986 "latency: %d, mmio: 0x%llx\n", dev->name,
1987 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
a589b665
ST
1988 dev->pci_lat,
1989 (unsigned long long)pci_resource_start(pci_dev, 0));
d19770e5
ST
1990
1991 pci_set_master(pci_dev);
1992 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1993 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1994 err = -EIO;
0c3a14c1 1995 goto fail_context;
d19770e5
ST
1996 }
1997
0c3a14c1
HV
1998 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
1999 if (IS_ERR(dev->alloc_ctx)) {
2000 err = PTR_ERR(dev->alloc_ctx);
2001 goto fail_context;
2002 }
d7515b88 2003 err = request_irq(pci_dev->irq, cx23885_irq,
3e018fe4 2004 IRQF_SHARED, dev->name, dev);
d19770e5
ST
2005 if (err < 0) {
2006 printk(KERN_ERR "%s: can't get IRQ %d\n",
2007 dev->name, pci_dev->irq);
2008 goto fail_irq;
2009 }
2010
afd96668
HV
2011 switch (dev->board) {
2012 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
78db8547
IL
2013 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2014 break;
2015 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2016 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
afd96668
HV
2017 break;
2018 }
5a23b076 2019
f59ad611
AW
2020 /*
2021 * The CX2388[58] IR controller can start firing interrupts when
2022 * enabled, so these have to take place after the cx23885_irq() handler
2023 * is hooked up by the call to request_irq() above.
2024 */
2025 cx23885_ir_pci_int_enable(dev);
dbda8f70 2026 cx23885_input_init(dev);
f59ad611 2027
d19770e5
ST
2028 return 0;
2029
2030fail_irq:
0c3a14c1
HV
2031 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2032fail_context:
d19770e5 2033 cx23885_dev_unregister(dev);
da59a4de
HV
2034fail_ctrl:
2035 v4l2_ctrl_handler_free(hdl);
c0714f6c 2036 v4l2_device_unregister(&dev->v4l2_dev);
d19770e5
ST
2037fail_free:
2038 kfree(dev);
2039 return err;
2040}
2041
4c62e976 2042static void cx23885_finidev(struct pci_dev *pci_dev)
d19770e5 2043{
c0714f6c
HV
2044 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2045 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
d19770e5 2046
dbda8f70 2047 cx23885_input_fini(dev);
f59ad611 2048 cx23885_ir_fini(dev);
d19770e5 2049
f59ad611 2050 cx23885_shutdown(dev);
29f8a0a5 2051
d19770e5
ST
2052 /* unregister stuff */
2053 free_irq(pci_dev->irq, dev);
d19770e5 2054
8d4d9329
HV
2055 pci_disable_device(pci_dev);
2056
d19770e5 2057 cx23885_dev_unregister(dev);
0c3a14c1 2058 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
da59a4de 2059 v4l2_ctrl_handler_free(&dev->ctrl_handler);
c0714f6c 2060 v4l2_device_unregister(v4l2_dev);
d19770e5
ST
2061 kfree(dev);
2062}
2063
2064static struct pci_device_id cx23885_pci_tbl[] = {
2065 {
2066 /* CX23885 */
2067 .vendor = 0x14f1,
2068 .device = 0x8852,
2069 .subvendor = PCI_ANY_ID,
2070 .subdevice = PCI_ANY_ID,
9c8ced51 2071 }, {
d19770e5
ST
2072 /* CX23887 Rev 2 */
2073 .vendor = 0x14f1,
2074 .device = 0x8880,
2075 .subvendor = PCI_ANY_ID,
2076 .subdevice = PCI_ANY_ID,
9c8ced51 2077 }, {
d19770e5
ST
2078 /* --- end of list --- */
2079 }
2080};
2081MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2082
2083static struct pci_driver cx23885_pci_driver = {
2084 .name = "cx23885",
2085 .id_table = cx23885_pci_tbl,
2086 .probe = cx23885_initdev,
4c62e976 2087 .remove = cx23885_finidev,
d19770e5
ST
2088 /* TODO */
2089 .suspend = NULL,
2090 .resume = NULL,
2091};
2092
9710e7a7 2093static int __init cx23885_init(void)
d19770e5 2094{
1990d50b
MCC
2095 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2096 CX23885_VERSION);
d19770e5
ST
2097 return pci_register_driver(&cx23885_pci_driver);
2098}
2099
9710e7a7 2100static void __exit cx23885_fini(void)
d19770e5
ST
2101{
2102 pci_unregister_driver(&cx23885_pci_driver);
2103}
2104
2105module_init(cx23885_init);
2106module_exit(cx23885_fini);