1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for the Conexant CX23885 PCIe bridge
4 *
5 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 */
7
8 #include "cx23885.h"
9
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kmod.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <asm/div64.h>
21 #include <linux/firmware.h>
22
23 #include "cimax2.h"
24 #include "altera-ci.h"
25 #include "cx23888-ir.h"
26 #include "cx23885-ir.h"
27 #include "cx23885-av.h"
28 #include "cx23885-input.h"
29
30 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
31 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
32 MODULE_LICENSE("GPL");
33 MODULE_VERSION(CX23885_VERSION);
34
35 /*
36 * Some platforms have been found to require periodic resetting of the DMA
37 * engine. Ryzen and XEON platforms are known to be affected. The symptom
38 * encountered is "mpeg risc op code error". Only Ryzen platforms employ
39 * this workaround if the option equals 1. The workaround can be explicitly
40 * disabled for all platforms by setting to 0, the workaround can be forced
41 * on for any platform by setting to 2.
42 */
43 static unsigned int dma_reset_workaround = 1;
44 module_param(dma_reset_workaround, int, 0644);
45 MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
46
47 static unsigned int debug;
48 module_param(debug, int, 0644);
49 MODULE_PARM_DESC(debug, "enable debug messages");
50
51 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
52 module_param_array(card, int, NULL, 0444);
53 MODULE_PARM_DESC(card, "card type");
54
55 #define dprintk(level, fmt, arg...)\
56 do { if (debug >= level)\
57 printk(KERN_DEBUG pr_fmt("%s: " fmt), \
58 __func__, ##arg); \
59 } while (0)
60
61 static unsigned int cx23885_devcount;
62
63 #define NO_SYNC_LINE (-1U)
64
65 /* FIXME, these allocations will change when
66 * analog arrives. The be reviewed.
67 * CX23887 Assumptions
68 * 1 line = 16 bytes of CDT
69 * cmds size = 80
70 * cdt size = 16 * linesize
71 * iqsize = 64
72 * maxlines = 6
73 *
74 * Address Space:
75 * 0x00000000 0x00008fff FIFO clusters
76 * 0x00010000 0x000104af Channel Management Data Structures
77 * 0x000104b0 0x000104ff Free
78 * 0x00010500 0x000108bf 15 channels * iqsize
79 * 0x000108c0 0x000108ff Free
80 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
81 * 15 channels * (iqsize + (maxlines * linesize))
82 * 0x00010ea0 0x00010xxx Free
83 */
84
85 static struct sram_channel cx23885_sram_channels[] = {
86 [SRAM_CH01] = {
87 .name = "VID A",
88 .cmds_start = 0x10000,
89 .ctrl_start = 0x10380,
90 .cdt = 0x104c0,
91 .fifo_start = 0x40,
92 .fifo_size = 0x2800,
93 .ptr1_reg = DMA1_PTR1,
94 .ptr2_reg = DMA1_PTR2,
95 .cnt1_reg = DMA1_CNT1,
96 .cnt2_reg = DMA1_CNT2,
97 },
98 [SRAM_CH02] = {
99 .name = "ch2",
100 .cmds_start = 0x0,
101 .ctrl_start = 0x0,
102 .cdt = 0x0,
103 .fifo_start = 0x0,
104 .fifo_size = 0x0,
105 .ptr1_reg = DMA2_PTR1,
106 .ptr2_reg = DMA2_PTR2,
107 .cnt1_reg = DMA2_CNT1,
108 .cnt2_reg = DMA2_CNT2,
109 },
110 [SRAM_CH03] = {
111 .name = "TS1 B",
112 .cmds_start = 0x100A0,
113 .ctrl_start = 0x10400,
114 .cdt = 0x10580,
115 .fifo_start = 0x5000,
116 .fifo_size = 0x1000,
117 .ptr1_reg = DMA3_PTR1,
118 .ptr2_reg = DMA3_PTR2,
119 .cnt1_reg = DMA3_CNT1,
120 .cnt2_reg = DMA3_CNT2,
121 },
122 [SRAM_CH04] = {
123 .name = "ch4",
124 .cmds_start = 0x0,
125 .ctrl_start = 0x0,
126 .cdt = 0x0,
127 .fifo_start = 0x0,
128 .fifo_size = 0x0,
129 .ptr1_reg = DMA4_PTR1,
130 .ptr2_reg = DMA4_PTR2,
131 .cnt1_reg = DMA4_CNT1,
132 .cnt2_reg = DMA4_CNT2,
133 },
134 [SRAM_CH05] = {
135 .name = "ch5",
136 .cmds_start = 0x0,
137 .ctrl_start = 0x0,
138 .cdt = 0x0,
139 .fifo_start = 0x0,
140 .fifo_size = 0x0,
141 .ptr1_reg = DMA5_PTR1,
142 .ptr2_reg = DMA5_PTR2,
143 .cnt1_reg = DMA5_CNT1,
144 .cnt2_reg = DMA5_CNT2,
145 },
146 [SRAM_CH06] = {
147 .name = "TS2 C",
148 .cmds_start = 0x10140,
149 .ctrl_start = 0x10440,
150 .cdt = 0x105e0,
151 .fifo_start = 0x6000,
152 .fifo_size = 0x1000,
153 .ptr1_reg = DMA5_PTR1,
154 .ptr2_reg = DMA5_PTR2,
155 .cnt1_reg = DMA5_CNT1,
156 .cnt2_reg = DMA5_CNT2,
157 },
158 [SRAM_CH07] = {
159 .name = "TV Audio",
160 .cmds_start = 0x10190,
161 .ctrl_start = 0x10480,
162 .cdt = 0x10a00,
163 .fifo_start = 0x7000,
164 .fifo_size = 0x1000,
165 .ptr1_reg = DMA6_PTR1,
166 .ptr2_reg = DMA6_PTR2,
167 .cnt1_reg = DMA6_CNT1,
168 .cnt2_reg = DMA6_CNT2,
169 },
170 [SRAM_CH08] = {
171 .name = "ch8",
172 .cmds_start = 0x0,
173 .ctrl_start = 0x0,
174 .cdt = 0x0,
175 .fifo_start = 0x0,
176 .fifo_size = 0x0,
177 .ptr1_reg = DMA7_PTR1,
178 .ptr2_reg = DMA7_PTR2,
179 .cnt1_reg = DMA7_CNT1,
180 .cnt2_reg = DMA7_CNT2,
181 },
182 [SRAM_CH09] = {
183 .name = "ch9",
184 .cmds_start = 0x0,
185 .ctrl_start = 0x0,
186 .cdt = 0x0,
187 .fifo_start = 0x0,
188 .fifo_size = 0x0,
189 .ptr1_reg = DMA8_PTR1,
190 .ptr2_reg = DMA8_PTR2,
191 .cnt1_reg = DMA8_CNT1,
192 .cnt2_reg = DMA8_CNT2,
193 },
194 };
195
196 static struct sram_channel cx23887_sram_channels[] = {
197 [SRAM_CH01] = {
198 .name = "VID A",
199 .cmds_start = 0x10000,
200 .ctrl_start = 0x105b0,
201 .cdt = 0x107b0,
202 .fifo_start = 0x40,
203 .fifo_size = 0x2800,
204 .ptr1_reg = DMA1_PTR1,
205 .ptr2_reg = DMA1_PTR2,
206 .cnt1_reg = DMA1_CNT1,
207 .cnt2_reg = DMA1_CNT2,
208 },
209 [SRAM_CH02] = {
210 .name = "VID A (VBI)",
211 .cmds_start = 0x10050,
212 .ctrl_start = 0x105F0,
213 .cdt = 0x10810,
214 .fifo_start = 0x3000,
215 .fifo_size = 0x1000,
216 .ptr1_reg = DMA2_PTR1,
217 .ptr2_reg = DMA2_PTR2,
218 .cnt1_reg = DMA2_CNT1,
219 .cnt2_reg = DMA2_CNT2,
220 },
221 [SRAM_CH03] = {
222 .name = "TS1 B",
223 .cmds_start = 0x100A0,
224 .ctrl_start = 0x10630,
225 .cdt = 0x10870,
226 .fifo_start = 0x5000,
227 .fifo_size = 0x1000,
228 .ptr1_reg = DMA3_PTR1,
229 .ptr2_reg = DMA3_PTR2,
230 .cnt1_reg = DMA3_CNT1,
231 .cnt2_reg = DMA3_CNT2,
232 },
233 [SRAM_CH04] = {
234 .name = "ch4",
235 .cmds_start = 0x0,
236 .ctrl_start = 0x0,
237 .cdt = 0x0,
238 .fifo_start = 0x0,
239 .fifo_size = 0x0,
240 .ptr1_reg = DMA4_PTR1,
241 .ptr2_reg = DMA4_PTR2,
242 .cnt1_reg = DMA4_CNT1,
243 .cnt2_reg = DMA4_CNT2,
244 },
245 [SRAM_CH05] = {
246 .name = "ch5",
247 .cmds_start = 0x0,
248 .ctrl_start = 0x0,
249 .cdt = 0x0,
250 .fifo_start = 0x0,
251 .fifo_size = 0x0,
252 .ptr1_reg = DMA5_PTR1,
253 .ptr2_reg = DMA5_PTR2,
254 .cnt1_reg = DMA5_CNT1,
255 .cnt2_reg = DMA5_CNT2,
256 },
257 [SRAM_CH06] = {
258 .name = "TS2 C",
259 .cmds_start = 0x10140,
260 .ctrl_start = 0x10670,
261 .cdt = 0x108d0,
262 .fifo_start = 0x6000,
263 .fifo_size = 0x1000,
264 .ptr1_reg = DMA5_PTR1,
265 .ptr2_reg = DMA5_PTR2,
266 .cnt1_reg = DMA5_CNT1,
267 .cnt2_reg = DMA5_CNT2,
268 },
269 [SRAM_CH07] = {
270 .name = "TV Audio",
271 .cmds_start = 0x10190,
272 .ctrl_start = 0x106B0,
273 .cdt = 0x10930,
274 .fifo_start = 0x7000,
275 .fifo_size = 0x1000,
276 .ptr1_reg = DMA6_PTR1,
277 .ptr2_reg = DMA6_PTR2,
278 .cnt1_reg = DMA6_CNT1,
279 .cnt2_reg = DMA6_CNT2,
280 },
281 [SRAM_CH08] = {
282 .name = "ch8",
283 .cmds_start = 0x0,
284 .ctrl_start = 0x0,
285 .cdt = 0x0,
286 .fifo_start = 0x0,
287 .fifo_size = 0x0,
288 .ptr1_reg = DMA7_PTR1,
289 .ptr2_reg = DMA7_PTR2,
290 .cnt1_reg = DMA7_CNT1,
291 .cnt2_reg = DMA7_CNT2,
292 },
293 [SRAM_CH09] = {
294 .name = "ch9",
295 .cmds_start = 0x0,
296 .ctrl_start = 0x0,
297 .cdt = 0x0,
298 .fifo_start = 0x0,
299 .fifo_size = 0x0,
300 .ptr1_reg = DMA8_PTR1,
301 .ptr2_reg = DMA8_PTR2,
302 .cnt1_reg = DMA8_CNT1,
303 .cnt2_reg = DMA8_CNT2,
304 },
305 };
306
cx23885_irq_add(struct cx23885_dev * dev,u32 mask)307 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
308 {
309 unsigned long flags;
310 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
311
312 dev->pci_irqmask |= mask;
313
314 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
315 }
316
cx23885_irq_add_enable(struct cx23885_dev * dev,u32 mask)317 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
318 {
319 unsigned long flags;
320 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
321
322 dev->pci_irqmask |= mask;
323 cx_set(PCI_INT_MSK, mask);
324
325 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
326 }
327
cx23885_irq_enable(struct cx23885_dev * dev,u32 mask)328 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
329 {
330 u32 v;
331 unsigned long flags;
332 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
333
334 v = mask & dev->pci_irqmask;
335 if (v)
336 cx_set(PCI_INT_MSK, v);
337
338 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
339 }
340
cx23885_irq_enable_all(struct cx23885_dev * dev)341 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
342 {
343 cx23885_irq_enable(dev, 0xffffffff);
344 }
345
cx23885_irq_disable(struct cx23885_dev * dev,u32 mask)346 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
347 {
348 unsigned long flags;
349 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
350
351 cx_clear(PCI_INT_MSK, mask);
352
353 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
354 }
355
cx23885_irq_disable_all(struct cx23885_dev * dev)356 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
357 {
358 cx23885_irq_disable(dev, 0xffffffff);
359 }
360
cx23885_irq_remove(struct cx23885_dev * dev,u32 mask)361 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
362 {
363 unsigned long flags;
364 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
365
366 dev->pci_irqmask &= ~mask;
367 cx_clear(PCI_INT_MSK, mask);
368
369 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
370 }
371
cx23885_irq_get_mask(struct cx23885_dev * dev)372 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
373 {
374 u32 v;
375 unsigned long flags;
376 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
377
378 v = cx_read(PCI_INT_MSK);
379
380 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
381 return v;
382 }
383
cx23885_risc_decode(u32 risc)384 static int cx23885_risc_decode(u32 risc)
385 {
386 static char *instr[16] = {
387 [RISC_SYNC >> 28] = "sync",
388 [RISC_WRITE >> 28] = "write",
389 [RISC_WRITEC >> 28] = "writec",
390 [RISC_READ >> 28] = "read",
391 [RISC_READC >> 28] = "readc",
392 [RISC_JUMP >> 28] = "jump",
393 [RISC_SKIP >> 28] = "skip",
394 [RISC_WRITERM >> 28] = "writerm",
395 [RISC_WRITECM >> 28] = "writecm",
396 [RISC_WRITECR >> 28] = "writecr",
397 };
398 static int incr[16] = {
399 [RISC_WRITE >> 28] = 3,
400 [RISC_JUMP >> 28] = 3,
401 [RISC_SKIP >> 28] = 1,
402 [RISC_SYNC >> 28] = 1,
403 [RISC_WRITERM >> 28] = 3,
404 [RISC_WRITECM >> 28] = 3,
405 [RISC_WRITECR >> 28] = 4,
406 };
407 static char *bits[] = {
408 "12", "13", "14", "resync",
409 "cnt0", "cnt1", "18", "19",
410 "20", "21", "22", "23",
411 "irq1", "irq2", "eol", "sol",
412 };
413 int i;
414
415 printk(KERN_DEBUG "0x%08x [ %s", risc,
416 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
417 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
418 if (risc & (1 << (i + 12)))
419 pr_cont(" %s", bits[i]);
420 pr_cont(" count=%d ]\n", risc & 0xfff);
421 return incr[risc >> 28] ? incr[risc >> 28] : 1;
422 }
423
cx23885_wakeup(struct cx23885_tsport * port,struct cx23885_dmaqueue * q,u32 count)424 static void cx23885_wakeup(struct cx23885_tsport *port,
425 struct cx23885_dmaqueue *q, u32 count)
426 {
427 struct cx23885_buffer *buf;
428 int count_delta;
429 int max_buf_done = 5; /* service maximum five buffers */
430
431 do {
432 if (list_empty(&q->active))
433 return;
434 buf = list_entry(q->active.next,
435 struct cx23885_buffer, queue);
436
437 buf->vb.vb2_buf.timestamp = ktime_get_ns();
438 buf->vb.sequence = q->count++;
439 if (count != (q->count % 65536)) {
440 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
441 buf->vb.vb2_buf.index, count, q->count);
442 } else {
443 dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
444 buf->vb.vb2_buf.index, count, q->count);
445 }
446 list_del(&buf->queue);
447 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
448 max_buf_done--;
449 /* count register is 16 bits so apply modulo appropriately */
450 count_delta = ((int)count - (int)(q->count % 65536));
451 } while ((count_delta > 0) && (max_buf_done > 0));
452 }
453
cx23885_sram_channel_setup(struct cx23885_dev * dev,struct sram_channel * ch,unsigned int bpl,u32 risc)454 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
455 struct sram_channel *ch,
456 unsigned int bpl, u32 risc)
457 {
458 unsigned int i, lines;
459 u32 cdt;
460
461 if (ch->cmds_start == 0) {
462 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
463 ch->name);
464 cx_write(ch->ptr1_reg, 0);
465 cx_write(ch->ptr2_reg, 0);
466 cx_write(ch->cnt2_reg, 0);
467 cx_write(ch->cnt1_reg, 0);
468 return 0;
469 } else {
470 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
471 ch->name);
472 }
473
474 bpl = (bpl + 7) & ~7; /* alignment */
475 cdt = ch->cdt;
476 lines = ch->fifo_size / bpl;
477 if (lines > 6)
478 lines = 6;
479 BUG_ON(lines < 2);
480
481 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
482 cx_write(8 + 4, 12);
483 cx_write(8 + 8, 0);
484
485 /* write CDT */
486 for (i = 0; i < lines; i++) {
487 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
488 ch->fifo_start + bpl*i);
489 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
490 cx_write(cdt + 16*i + 4, 0);
491 cx_write(cdt + 16*i + 8, 0);
492 cx_write(cdt + 16*i + 12, 0);
493 }
494
495 /* write CMDS */
496 if (ch->jumponly)
497 cx_write(ch->cmds_start + 0, 8);
498 else
499 cx_write(ch->cmds_start + 0, risc);
500 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
501 cx_write(ch->cmds_start + 8, cdt);
502 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
503 cx_write(ch->cmds_start + 16, ch->ctrl_start);
504 if (ch->jumponly)
505 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
506 else
507 cx_write(ch->cmds_start + 20, 64 >> 2);
508 for (i = 24; i < 80; i += 4)
509 cx_write(ch->cmds_start + i, 0);
510
511 /* fill registers */
512 cx_write(ch->ptr1_reg, ch->fifo_start);
513 cx_write(ch->ptr2_reg, cdt);
514 cx_write(ch->cnt2_reg, (lines*16) >> 3);
515 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
516
517 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
518 dev->bridge,
519 ch->name,
520 bpl,
521 lines);
522
523 return 0;
524 }
525
cx23885_sram_channel_dump(struct cx23885_dev * dev,struct sram_channel * ch)526 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
527 struct sram_channel *ch)
528 {
529 static char *name[] = {
530 "init risc lo",
531 "init risc hi",
532 "cdt base",
533 "cdt size",
534 "iq base",
535 "iq size",
536 "risc pc lo",
537 "risc pc hi",
538 "iq wr ptr",
539 "iq rd ptr",
540 "cdt current",
541 "pci target lo",
542 "pci target hi",
543 "line / byte",
544 };
545 u32 risc;
546 unsigned int i, j, n;
547
548 pr_warn("%s: %s - dma channel status dump\n",
549 dev->name, ch->name);
550 for (i = 0; i < ARRAY_SIZE(name); i++)
551 pr_warn("%s: cmds: %-15s: 0x%08x\n",
552 dev->name, name[i],
553 cx_read(ch->cmds_start + 4*i));
554
555 for (i = 0; i < 4; i++) {
556 risc = cx_read(ch->cmds_start + 4 * (i + 14));
557 pr_warn("%s: risc%d: ", dev->name, i);
558 cx23885_risc_decode(risc);
559 }
560 for (i = 0; i < (64 >> 2); i += n) {
561 risc = cx_read(ch->ctrl_start + 4 * i);
562 /* No consideration for bits 63-32 */
563
564 pr_warn("%s: (0x%08x) iq %x: ", dev->name,
565 ch->ctrl_start + 4 * i, i);
566 n = cx23885_risc_decode(risc);
567 for (j = 1; j < n; j++) {
568 risc = cx_read(ch->ctrl_start + 4 * (i + j));
569 pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
570 dev->name, i+j, risc, j);
571 }
572 }
573
574 pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
575 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
576 pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
577 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
578 pr_warn("%s: ptr1_reg: 0x%08x\n",
579 dev->name, cx_read(ch->ptr1_reg));
580 pr_warn("%s: ptr2_reg: 0x%08x\n",
581 dev->name, cx_read(ch->ptr2_reg));
582 pr_warn("%s: cnt1_reg: 0x%08x\n",
583 dev->name, cx_read(ch->cnt1_reg));
584 pr_warn("%s: cnt2_reg: 0x%08x\n",
585 dev->name, cx_read(ch->cnt2_reg));
586 }
587
cx23885_risc_disasm(struct cx23885_tsport * port,struct cx23885_riscmem * risc)588 static void cx23885_risc_disasm(struct cx23885_tsport *port,
589 struct cx23885_riscmem *risc)
590 {
591 struct cx23885_dev *dev = port->dev;
592 unsigned int i, j, n;
593
594 pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
595 dev->name, risc->cpu, (unsigned long)risc->dma);
596 for (i = 0; i < (risc->size >> 2); i += n) {
597 pr_info("%s: %04d: ", dev->name, i);
598 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
599 for (j = 1; j < n; j++)
600 pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
601 dev->name, i + j, risc->cpu[i + j], j);
602 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
603 break;
604 }
605 }
606
cx23885_clear_bridge_error(struct cx23885_dev * dev)607 static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
608 {
609 uint32_t reg1_val, reg2_val;
610
611 if (!dev->need_dma_reset)
612 return;
613
614 reg1_val = cx_read(TC_REQ); /* read-only */
615 reg2_val = cx_read(TC_REQ_SET);
616
617 if (reg1_val && reg2_val) {
618 cx_write(TC_REQ, reg1_val);
619 cx_write(TC_REQ_SET, reg2_val);
620 cx_read(VID_B_DMA);
621 cx_read(VBI_B_DMA);
622 cx_read(VID_C_DMA);
623 cx_read(VBI_C_DMA);
624
625 dev_info(&dev->pci->dev,
626 "dma in progress detected 0x%08x 0x%08x, clearing\n",
627 reg1_val, reg2_val);
628 }
629 }
630
cx23885_shutdown(struct cx23885_dev * dev)631 static void cx23885_shutdown(struct cx23885_dev *dev)
632 {
633 /* disable RISC controller */
634 cx_write(DEV_CNTRL2, 0);
635
636 /* Disable all IR activity */
637 cx_write(IR_CNTRL_REG, 0);
638
639 /* Disable Video A/B activity */
640 cx_write(VID_A_DMA_CTL, 0);
641 cx_write(VID_B_DMA_CTL, 0);
642 cx_write(VID_C_DMA_CTL, 0);
643
644 /* Disable Audio activity */
645 cx_write(AUD_INT_DMA_CTL, 0);
646 cx_write(AUD_EXT_DMA_CTL, 0);
647
648 /* Disable Serial port */
649 cx_write(UART_CTL, 0);
650
651 /* Disable Interrupts */
652 cx23885_irq_disable_all(dev);
653 cx_write(VID_A_INT_MSK, 0);
654 cx_write(VID_B_INT_MSK, 0);
655 cx_write(VID_C_INT_MSK, 0);
656 cx_write(AUDIO_INT_INT_MSK, 0);
657 cx_write(AUDIO_EXT_INT_MSK, 0);
658
659 }
660
cx23885_reset(struct cx23885_dev * dev)661 static void cx23885_reset(struct cx23885_dev *dev)
662 {
663 dprintk(1, "%s()\n", __func__);
664
665 cx23885_shutdown(dev);
666
667 cx_write(PCI_INT_STAT, 0xffffffff);
668 cx_write(VID_A_INT_STAT, 0xffffffff);
669 cx_write(VID_B_INT_STAT, 0xffffffff);
670 cx_write(VID_C_INT_STAT, 0xffffffff);
671 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
672 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
673 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
674 cx_write(PAD_CTRL, 0x00500300);
675
676 /* clear dma in progress */
677 cx23885_clear_bridge_error(dev);
678 msleep(100);
679
680 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
681 720*4, 0);
682 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
683 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
684 188*4, 0);
685 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
686 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
687 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
688 188*4, 0);
689 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
690 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
691 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
692
693 cx23885_gpio_setup(dev);
694
695 cx23885_irq_get_mask(dev);
696
697 /* clear dma in progress */
698 cx23885_clear_bridge_error(dev);
699 }
700
701
cx23885_pci_quirks(struct cx23885_dev * dev)702 static int cx23885_pci_quirks(struct cx23885_dev *dev)
703 {
704 dprintk(1, "%s()\n", __func__);
705
706 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
707 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
708 * occur on the cx23887 bridge.
709 */
710 if (dev->bridge == CX23885_BRIDGE_885)
711 cx_clear(RDR_TLCTL0, 1 << 4);
712
713 /* clear dma in progress */
714 cx23885_clear_bridge_error(dev);
715 return 0;
716 }
717
get_resources(struct cx23885_dev * dev)718 static int get_resources(struct cx23885_dev *dev)
719 {
720 if (request_mem_region(pci_resource_start(dev->pci, 0),
721 pci_resource_len(dev->pci, 0),
722 dev->name))
723 return 0;
724
725 pr_err("%s: can't get MMIO memory @ 0x%llx\n",
726 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
727
728 return -EBUSY;
729 }
730
cx23885_init_tsport(struct cx23885_dev * dev,struct cx23885_tsport * port,int portno)731 static int cx23885_init_tsport(struct cx23885_dev *dev,
732 struct cx23885_tsport *port, int portno)
733 {
734 dprintk(1, "%s(portno=%d)\n", __func__, portno);
735
736 /* Transport bus init dma queue - Common settings */
737 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
738 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
739 port->vld_misc_val = 0x0;
740 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
741
742 spin_lock_init(&port->slock);
743 port->dev = dev;
744 port->nr = portno;
745
746 INIT_LIST_HEAD(&port->mpegq.active);
747 mutex_init(&port->frontends.lock);
748 INIT_LIST_HEAD(&port->frontends.felist);
749 port->frontends.active_fe_id = 0;
750
751 /* This should be hardcoded allow a single frontend
752 * attachment to this tsport, keeping the -dvb.c
753 * code clean and safe.
754 */
755 if (!port->num_frontends)
756 port->num_frontends = 1;
757
758 switch (portno) {
759 case 1:
760 port->reg_gpcnt = VID_B_GPCNT;
761 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
762 port->reg_dma_ctl = VID_B_DMA_CTL;
763 port->reg_lngth = VID_B_LNGTH;
764 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
765 port->reg_gen_ctrl = VID_B_GEN_CTL;
766 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
767 port->reg_sop_status = VID_B_SOP_STATUS;
768 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
769 port->reg_vld_misc = VID_B_VLD_MISC;
770 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
771 port->reg_src_sel = VID_B_SRC_SEL;
772 port->reg_ts_int_msk = VID_B_INT_MSK;
773 port->reg_ts_int_stat = VID_B_INT_STAT;
774 port->sram_chno = SRAM_CH03; /* VID_B */
775 port->pci_irqmask = 0x02; /* VID_B bit1 */
776 break;
777 case 2:
778 port->reg_gpcnt = VID_C_GPCNT;
779 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
780 port->reg_dma_ctl = VID_C_DMA_CTL;
781 port->reg_lngth = VID_C_LNGTH;
782 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
783 port->reg_gen_ctrl = VID_C_GEN_CTL;
784 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
785 port->reg_sop_status = VID_C_SOP_STATUS;
786 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
787 port->reg_vld_misc = VID_C_VLD_MISC;
788 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
789 port->reg_src_sel = 0;
790 port->reg_ts_int_msk = VID_C_INT_MSK;
791 port->reg_ts_int_stat = VID_C_INT_STAT;
792 port->sram_chno = SRAM_CH06; /* VID_C */
793 port->pci_irqmask = 0x04; /* VID_C bit2 */
794 break;
795 default:
796 BUG();
797 }
798
799 return 0;
800 }
801
cx23885_dev_checkrevision(struct cx23885_dev * dev)802 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
803 {
804 switch (cx_read(RDR_CFG2) & 0xff) {
805 case 0x00:
806 /* cx23885 */
807 dev->hwrevision = 0xa0;
808 break;
809 case 0x01:
810 /* CX23885-12Z */
811 dev->hwrevision = 0xa1;
812 break;
813 case 0x02:
814 /* CX23885-13Z/14Z */
815 dev->hwrevision = 0xb0;
816 break;
817 case 0x03:
818 if (dev->pci->device == 0x8880) {
819 /* CX23888-21Z/22Z */
820 dev->hwrevision = 0xc0;
821 } else {
822 /* CX23885-14Z */
823 dev->hwrevision = 0xa4;
824 }
825 break;
826 case 0x04:
827 if (dev->pci->device == 0x8880) {
828 /* CX23888-31Z */
829 dev->hwrevision = 0xd0;
830 } else {
831 /* CX23885-15Z, CX23888-31Z */
832 dev->hwrevision = 0xa5;
833 }
834 break;
835 case 0x0e:
836 /* CX23887-15Z */
837 dev->hwrevision = 0xc0;
838 break;
839 case 0x0f:
840 /* CX23887-14Z */
841 dev->hwrevision = 0xb1;
842 break;
843 default:
844 pr_err("%s() New hardware revision found 0x%x\n",
845 __func__, dev->hwrevision);
846 }
847 if (dev->hwrevision)
848 pr_info("%s() Hardware revision = 0x%02x\n",
849 __func__, dev->hwrevision);
850 else
851 pr_err("%s() Hardware revision unknown 0x%x\n",
852 __func__, dev->hwrevision);
853 }
854
855 /* Find the first v4l2_subdev member of the group id in hw */
cx23885_find_hw(struct cx23885_dev * dev,u32 hw)856 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
857 {
858 struct v4l2_subdev *result = NULL;
859 struct v4l2_subdev *sd;
860
861 spin_lock(&dev->v4l2_dev.lock);
862 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
863 if (sd->grp_id == hw) {
864 result = sd;
865 break;
866 }
867 }
868 spin_unlock(&dev->v4l2_dev.lock);
869 return result;
870 }
871
cx23885_dev_setup(struct cx23885_dev * dev)872 static int cx23885_dev_setup(struct cx23885_dev *dev)
873 {
874 int i;
875
876 spin_lock_init(&dev->pci_irqmask_lock);
877 spin_lock_init(&dev->slock);
878
879 mutex_init(&dev->lock);
880 mutex_init(&dev->gpio_lock);
881
882 atomic_inc(&dev->refcount);
883
884 dev->nr = cx23885_devcount++;
885 sprintf(dev->name, "cx23885[%d]", dev->nr);
886
887 /* Configure the internal memory */
888 if (dev->pci->device == 0x8880) {
889 /* Could be 887 or 888, assume an 888 default */
890 dev->bridge = CX23885_BRIDGE_888;
891 /* Apply a sensible clock frequency for the PCIe bridge */
892 dev->clk_freq = 50000000;
893 dev->sram_channels = cx23887_sram_channels;
894 } else
895 if (dev->pci->device == 0x8852) {
896 dev->bridge = CX23885_BRIDGE_885;
897 /* Apply a sensible clock frequency for the PCIe bridge */
898 dev->clk_freq = 28000000;
899 dev->sram_channels = cx23885_sram_channels;
900 } else
901 BUG();
902
903 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
904 __func__, dev->bridge);
905
906 /* board config */
907 dev->board = UNSET;
908 if (card[dev->nr] < cx23885_bcount)
909 dev->board = card[dev->nr];
910 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
911 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
912 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
913 dev->board = cx23885_subids[i].card;
914 if (UNSET == dev->board) {
915 dev->board = CX23885_BOARD_UNKNOWN;
916 cx23885_card_list(dev);
917 }
918
919 if (dev->pci->device == 0x8852) {
920 /* no DIF on cx23885, so no analog tuner support possible */
921 if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC)
922 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885;
923 else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB)
924 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885;
925 }
926
927 /* If the user specific a clk freq override, apply it */
928 if (cx23885_boards[dev->board].clk_freq > 0)
929 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
930
931 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
932 dev->pci->subsystem_device == 0x7137) {
933 /* Hauppauge ImpactVCBe device ID 0x7137 is populated
934 * with an 888, and a 25Mhz crystal, instead of the
935 * usual third overtone 50Mhz. The default clock rate must
936 * be overridden so the cx25840 is properly configured
937 */
938 dev->clk_freq = 25000000;
939 }
940
941 dev->pci_bus = dev->pci->bus->number;
942 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
943 cx23885_irq_add(dev, 0x001f00);
944
945 /* External Master 1 Bus */
946 dev->i2c_bus[0].nr = 0;
947 dev->i2c_bus[0].dev = dev;
948 dev->i2c_bus[0].reg_stat = I2C1_STAT;
949 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
950 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
951 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
952 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
953 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
954
955 /* External Master 2 Bus */
956 dev->i2c_bus[1].nr = 1;
957 dev->i2c_bus[1].dev = dev;
958 dev->i2c_bus[1].reg_stat = I2C2_STAT;
959 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
960 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
961 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
962 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
963 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
964
965 /* Internal Master 3 Bus */
966 dev->i2c_bus[2].nr = 2;
967 dev->i2c_bus[2].dev = dev;
968 dev->i2c_bus[2].reg_stat = I2C3_STAT;
969 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
970 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
971 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
972 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
973 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
974
975 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
976 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
977 cx23885_init_tsport(dev, &dev->ts1, 1);
978
979 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
980 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
981 cx23885_init_tsport(dev, &dev->ts2, 2);
982
983 if (get_resources(dev) < 0) {
984 pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
985 dev->name, dev->pci->subsystem_vendor,
986 dev->pci->subsystem_device);
987
988 cx23885_devcount--;
989 return -ENODEV;
990 }
991
992 /* PCIe stuff */
993 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
994 pci_resource_len(dev->pci, 0));
995
996 dev->bmmio = (u8 __iomem *)dev->lmmio;
997
998 pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
999 dev->name, dev->pci->subsystem_vendor,
1000 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
1001 dev->board, card[dev->nr] == dev->board ?
1002 "insmod option" : "autodetected");
1003
1004 cx23885_pci_quirks(dev);
1005
1006 /* Assume some sensible defaults */
1007 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
1008 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
1009 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
1010 dev->radio_type = cx23885_boards[dev->board].radio_type;
1011 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
1012
1013 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
1014 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
1015 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
1016 __func__, dev->radio_type, dev->radio_addr);
1017
1018 /* The cx23417 encoder has GPIO's that need to be initialised
1019 * before DVB, so that demodulators and tuners are out of
1020 * reset before DVB uses them.
1021 */
1022 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
1023 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
1024 cx23885_mc417_init(dev);
1025
1026 /* init hardware */
1027 cx23885_reset(dev);
1028
1029 cx23885_i2c_register(&dev->i2c_bus[0]);
1030 cx23885_i2c_register(&dev->i2c_bus[1]);
1031 cx23885_i2c_register(&dev->i2c_bus[2]);
1032 cx23885_card_setup(dev);
1033 call_all(dev, tuner, standby);
1034 cx23885_ir_init(dev);
1035
1036 if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
1037 /*
1038 * GPIOs 9/8 are input detection bits for the breakout video
1039 * (gpio 8) and audio (gpio 9) cables. When they're attached,
1040 * this gpios are pulled high. Make sure these GPIOs are marked
1041 * as inputs.
1042 */
1043 cx23885_gpio_enable(dev, 0x300, 0);
1044 }
1045
1046 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1047 if (cx23885_video_register(dev) < 0) {
1048 pr_err("%s() Failed to register analog video adapters on VID_A\n",
1049 __func__);
1050 }
1051 }
1052
1053 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1054 if (cx23885_boards[dev->board].num_fds_portb)
1055 dev->ts1.num_frontends =
1056 cx23885_boards[dev->board].num_fds_portb;
1057 if (cx23885_dvb_register(&dev->ts1) < 0) {
1058 pr_err("%s() Failed to register dvb adapters on VID_B\n",
1059 __func__);
1060 }
1061 } else
1062 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1063 if (cx23885_417_register(dev) < 0) {
1064 pr_err("%s() Failed to register 417 on VID_B\n",
1065 __func__);
1066 }
1067 }
1068
1069 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1070 if (cx23885_boards[dev->board].num_fds_portc)
1071 dev->ts2.num_frontends =
1072 cx23885_boards[dev->board].num_fds_portc;
1073 if (cx23885_dvb_register(&dev->ts2) < 0) {
1074 pr_err("%s() Failed to register dvb on VID_C\n",
1075 __func__);
1076 }
1077 } else
1078 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1079 if (cx23885_417_register(dev) < 0) {
1080 pr_err("%s() Failed to register 417 on VID_C\n",
1081 __func__);
1082 }
1083 }
1084
1085 cx23885_dev_checkrevision(dev);
1086
1087 /* disable MSI for NetUP cards, otherwise CI is not working */
1088 if (cx23885_boards[dev->board].ci_type > 0)
1089 cx_clear(RDR_RDRCTL1, 1 << 8);
1090
1091 switch (dev->board) {
1092 case CX23885_BOARD_TEVII_S470:
1093 case CX23885_BOARD_TEVII_S471:
1094 cx_clear(RDR_RDRCTL1, 1 << 8);
1095 break;
1096 }
1097
1098 return 0;
1099 }
1100
cx23885_dev_unregister(struct cx23885_dev * dev)1101 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1102 {
1103 release_mem_region(pci_resource_start(dev->pci, 0),
1104 pci_resource_len(dev->pci, 0));
1105
1106 if (!atomic_dec_and_test(&dev->refcount))
1107 return;
1108
1109 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1110 cx23885_video_unregister(dev);
1111
1112 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1113 cx23885_dvb_unregister(&dev->ts1);
1114
1115 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1116 cx23885_417_unregister(dev);
1117
1118 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1119 cx23885_dvb_unregister(&dev->ts2);
1120
1121 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1122 cx23885_417_unregister(dev);
1123
1124 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1125 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1126 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1127
1128 iounmap(dev->lmmio);
1129 }
1130
cx23885_risc_field(__le32 * rp,struct scatterlist * sglist,unsigned int offset,u32 sync_line,unsigned int bpl,unsigned int padding,unsigned int lines,unsigned int lpi,bool jump)1131 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1132 unsigned int offset, u32 sync_line,
1133 unsigned int bpl, unsigned int padding,
1134 unsigned int lines, unsigned int lpi, bool jump)
1135 {
1136 struct scatterlist *sg;
1137 unsigned int line, todo, sol;
1138
1139
1140 if (jump) {
1141 *(rp++) = cpu_to_le32(RISC_JUMP);
1142 *(rp++) = cpu_to_le32(0);
1143 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1144 }
1145
1146 /* sync instruction */
1147 if (sync_line != NO_SYNC_LINE)
1148 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1149
1150 /* scan lines */
1151 sg = sglist;
1152 for (line = 0; line < lines; line++) {
1153 while (offset && offset >= sg_dma_len(sg)) {
1154 offset -= sg_dma_len(sg);
1155 sg = sg_next(sg);
1156 }
1157
1158 if (lpi && line > 0 && !(line % lpi))
1159 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1160 else
1161 sol = RISC_SOL;
1162
1163 if (bpl <= sg_dma_len(sg)-offset) {
1164 /* fits into current chunk */
1165 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1166 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1167 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1168 offset += bpl;
1169 } else {
1170 /* scanline needs to be split */
1171 todo = bpl;
1172 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
1173 (sg_dma_len(sg)-offset));
1174 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1175 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1176 todo -= (sg_dma_len(sg)-offset);
1177 offset = 0;
1178 sg = sg_next(sg);
1179 while (todo > sg_dma_len(sg)) {
1180 *(rp++) = cpu_to_le32(RISC_WRITE|
1181 sg_dma_len(sg));
1182 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1183 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1184 todo -= sg_dma_len(sg);
1185 sg = sg_next(sg);
1186 }
1187 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1188 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1189 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1190 offset += todo;
1191 }
1192 offset += padding;
1193 }
1194
1195 return rp;
1196 }
1197
cx23885_risc_buffer(struct pci_dev * pci,struct cx23885_riscmem * risc,struct scatterlist * sglist,unsigned int top_offset,unsigned int bottom_offset,unsigned int bpl,unsigned int padding,unsigned int lines)1198 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1199 struct scatterlist *sglist, unsigned int top_offset,
1200 unsigned int bottom_offset, unsigned int bpl,
1201 unsigned int padding, unsigned int lines)
1202 {
1203 u32 instructions, fields;
1204 __le32 *rp;
1205
1206 fields = 0;
1207 if (UNSET != top_offset)
1208 fields++;
1209 if (UNSET != bottom_offset)
1210 fields++;
1211
1212 /* estimate risc mem: worst case is one write per page border +
1213 one write per scan line + syncs + jump (all 2 dwords). Padding
1214 can cause next bpl to start close to a page border. First DMA
1215 region may be smaller than PAGE_SIZE */
1216 /* write and jump need and extra dword */
1217 instructions = fields * (1 + ((bpl + padding) * lines)
1218 / PAGE_SIZE + lines);
1219 instructions += 5;
1220 risc->size = instructions * 12;
1221 risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
1222 GFP_KERNEL);
1223 if (risc->cpu == NULL)
1224 return -ENOMEM;
1225
1226 /* write risc instructions */
1227 rp = risc->cpu;
1228 if (UNSET != top_offset)
1229 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1230 bpl, padding, lines, 0, true);
1231 if (UNSET != bottom_offset)
1232 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1233 bpl, padding, lines, 0, UNSET == top_offset);
1234
1235 /* save pointer to jmp instruction address */
1236 risc->jmp = rp;
1237 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1238 return 0;
1239 }
1240
cx23885_risc_databuffer(struct pci_dev * pci,struct cx23885_riscmem * risc,struct scatterlist * sglist,unsigned int bpl,unsigned int lines,unsigned int lpi)1241 int cx23885_risc_databuffer(struct pci_dev *pci,
1242 struct cx23885_riscmem *risc,
1243 struct scatterlist *sglist,
1244 unsigned int bpl,
1245 unsigned int lines, unsigned int lpi)
1246 {
1247 u32 instructions;
1248 __le32 *rp;
1249
1250 /* estimate risc mem: worst case is one write per page border +
1251 one write per scan line + syncs + jump (all 2 dwords). Here
1252 there is no padding and no sync. First DMA region may be smaller
1253 than PAGE_SIZE */
1254 /* Jump and write need an extra dword */
1255 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1256 instructions += 4;
1257
1258 risc->size = instructions * 12;
1259 risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
1260 GFP_KERNEL);
1261 if (risc->cpu == NULL)
1262 return -ENOMEM;
1263
1264 /* write risc instructions */
1265 rp = risc->cpu;
1266 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1267 bpl, 0, lines, lpi, lpi == 0);
1268
1269 /* save pointer to jmp instruction address */
1270 risc->jmp = rp;
1271 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1272 return 0;
1273 }
1274
cx23885_risc_vbibuffer(struct pci_dev * pci,struct cx23885_riscmem * risc,struct scatterlist * sglist,unsigned int top_offset,unsigned int bottom_offset,unsigned int bpl,unsigned int padding,unsigned int lines)1275 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1276 struct scatterlist *sglist, unsigned int top_offset,
1277 unsigned int bottom_offset, unsigned int bpl,
1278 unsigned int padding, unsigned int lines)
1279 {
1280 u32 instructions, fields;
1281 __le32 *rp;
1282
1283 fields = 0;
1284 if (UNSET != top_offset)
1285 fields++;
1286 if (UNSET != bottom_offset)
1287 fields++;
1288
1289 /* estimate risc mem: worst case is one write per page border +
1290 one write per scan line + syncs + jump (all 2 dwords). Padding
1291 can cause next bpl to start close to a page border. First DMA
1292 region may be smaller than PAGE_SIZE */
1293 /* write and jump need and extra dword */
1294 instructions = fields * (1 + ((bpl + padding) * lines)
1295 / PAGE_SIZE + lines);
1296 instructions += 5;
1297 risc->size = instructions * 12;
1298 risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
1299 GFP_KERNEL);
1300 if (risc->cpu == NULL)
1301 return -ENOMEM;
1302 /* write risc instructions */
1303 rp = risc->cpu;
1304
1305 /* Sync to line 6, so US CC line 21 will appear in line '12'
1306 * in the userland vbi payload */
1307 if (UNSET != top_offset)
1308 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1309 bpl, padding, lines, 0, true);
1310
1311 if (UNSET != bottom_offset)
1312 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1313 bpl, padding, lines, 0, UNSET == top_offset);
1314
1315
1316
1317 /* save pointer to jmp instruction address */
1318 risc->jmp = rp;
1319 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1320 return 0;
1321 }
1322
1323
cx23885_free_buffer(struct cx23885_dev * dev,struct cx23885_buffer * buf)1324 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1325 {
1326 struct cx23885_riscmem *risc = &buf->risc;
1327
1328 dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu, risc->dma);
1329 }
1330
cx23885_tsport_reg_dump(struct cx23885_tsport * port)1331 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1332 {
1333 struct cx23885_dev *dev = port->dev;
1334
1335 dprintk(1, "%s() Register Dump\n", __func__);
1336 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1337 cx_read(DEV_CNTRL2));
1338 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1339 cx23885_irq_get_mask(dev));
1340 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1341 cx_read(AUDIO_INT_INT_MSK));
1342 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1343 cx_read(AUD_INT_DMA_CTL));
1344 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1345 cx_read(AUDIO_EXT_INT_MSK));
1346 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1347 cx_read(AUD_EXT_DMA_CTL));
1348 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1349 cx_read(PAD_CTRL));
1350 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1351 cx_read(ALT_PIN_OUT_SEL));
1352 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1353 cx_read(GPIO2));
1354 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1355 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1356 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1357 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1358 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1359 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1360 if (port->reg_src_sel)
1361 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1362 port->reg_src_sel, cx_read(port->reg_src_sel));
1363 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1364 port->reg_lngth, cx_read(port->reg_lngth));
1365 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1366 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1367 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1368 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1369 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1370 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1371 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1372 port->reg_sop_status, cx_read(port->reg_sop_status));
1373 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1374 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1375 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1376 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1377 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1378 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1379 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1380 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1381 dprintk(1, "%s() ts_int_status(0x%08X) 0x%08x\n", __func__,
1382 port->reg_ts_int_stat, cx_read(port->reg_ts_int_stat));
1383 dprintk(1, "%s() PCI_INT_STAT 0x%08X\n", __func__,
1384 cx_read(PCI_INT_STAT));
1385 dprintk(1, "%s() VID_B_INT_MSTAT 0x%08X\n", __func__,
1386 cx_read(VID_B_INT_MSTAT));
1387 dprintk(1, "%s() VID_B_INT_SSTAT 0x%08X\n", __func__,
1388 cx_read(VID_B_INT_SSTAT));
1389 dprintk(1, "%s() VID_C_INT_MSTAT 0x%08X\n", __func__,
1390 cx_read(VID_C_INT_MSTAT));
1391 dprintk(1, "%s() VID_C_INT_SSTAT 0x%08X\n", __func__,
1392 cx_read(VID_C_INT_SSTAT));
1393 }
1394
cx23885_start_dma(struct cx23885_tsport * port,struct cx23885_dmaqueue * q,struct cx23885_buffer * buf)1395 int cx23885_start_dma(struct cx23885_tsport *port,
1396 struct cx23885_dmaqueue *q,
1397 struct cx23885_buffer *buf)
1398 {
1399 struct cx23885_dev *dev = port->dev;
1400 u32 reg;
1401
1402 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1403 dev->width, dev->height, dev->field);
1404
1405 /* clear dma in progress */
1406 cx23885_clear_bridge_error(dev);
1407
1408 /* Stop the fifo and risc engine for this port */
1409 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1410
1411 /* setup fifo + format */
1412 cx23885_sram_channel_setup(dev,
1413 &dev->sram_channels[port->sram_chno],
1414 port->ts_packet_size, buf->risc.dma);
1415 if (debug > 5) {
1416 cx23885_sram_channel_dump(dev,
1417 &dev->sram_channels[port->sram_chno]);
1418 cx23885_risc_disasm(port, &buf->risc);
1419 }
1420
1421 /* write TS length to chip */
1422 cx_write(port->reg_lngth, port->ts_packet_size);
1423
1424 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1425 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1426 pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1427 __func__,
1428 cx23885_boards[dev->board].portb,
1429 cx23885_boards[dev->board].portc);
1430 return -EINVAL;
1431 }
1432
1433 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1434 cx23885_av_clk(dev, 0);
1435
1436 udelay(100);
1437
1438 /* If the port supports SRC SELECT, configure it */
1439 if (port->reg_src_sel)
1440 cx_write(port->reg_src_sel, port->src_sel_val);
1441
1442 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1443 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1444 cx_write(port->reg_vld_misc, port->vld_misc_val);
1445 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1446 udelay(100);
1447
1448 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1449 /* reset counter to zero */
1450 cx_write(port->reg_gpcnt_ctl, 3);
1451 q->count = 0;
1452
1453 /* Set VIDB pins to input */
1454 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1455 reg = cx_read(PAD_CTRL);
1456 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1457 cx_write(PAD_CTRL, reg);
1458 }
1459
1460 /* Set VIDC pins to input */
1461 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1462 reg = cx_read(PAD_CTRL);
1463 reg &= ~0x4; /* Clear TS2_SOP_OE */
1464 cx_write(PAD_CTRL, reg);
1465 }
1466
1467 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1468
1469 reg = cx_read(PAD_CTRL);
1470 reg = reg & ~0x1; /* Clear TS1_OE */
1471
1472 /* FIXME, bit 2 writing here is questionable */
1473 /* set TS1_SOP_OE and TS1_OE_HI */
1474 reg = reg | 0xa;
1475 cx_write(PAD_CTRL, reg);
1476
1477 /* Sets MOE_CLK_DIS to disable MoE clock */
1478 /* sets MCLK_DLY_SEL/BCLK_DLY_SEL to 1 buffer delay each */
1479 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1480
1481 /* ALT_GPIO_ALT_SET: GPIO[0]
1482 * IR_ALT_TX_SEL: GPIO[1]
1483 * GPIO1_ALT_SEL: VIP_656_DATA[0]
1484 * GPIO0_ALT_SEL: VIP_656_CLK
1485 */
1486 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1487 }
1488
1489 switch (dev->bridge) {
1490 case CX23885_BRIDGE_885:
1491 case CX23885_BRIDGE_887:
1492 case CX23885_BRIDGE_888:
1493 /* enable irqs */
1494 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1495 /* clear dma in progress */
1496 cx23885_clear_bridge_error(dev);
1497 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1498 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1499
1500 /* clear dma in progress */
1501 cx23885_clear_bridge_error(dev);
1502 cx23885_irq_add(dev, port->pci_irqmask);
1503 cx23885_irq_enable_all(dev);
1504
1505 /* clear dma in progress */
1506 cx23885_clear_bridge_error(dev);
1507 break;
1508 default:
1509 BUG();
1510 }
1511
1512 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1513 /* clear dma in progress */
1514 cx23885_clear_bridge_error(dev);
1515
1516 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1517 cx23885_av_clk(dev, 1);
1518
1519 if (debug > 4)
1520 cx23885_tsport_reg_dump(port);
1521
1522 cx23885_irq_get_mask(dev);
1523
1524 /* clear dma in progress */
1525 cx23885_clear_bridge_error(dev);
1526
1527 return 0;
1528 }
1529
cx23885_stop_dma(struct cx23885_tsport * port)1530 static int cx23885_stop_dma(struct cx23885_tsport *port)
1531 {
1532 struct cx23885_dev *dev = port->dev;
1533 u32 reg;
1534 int delay = 0;
1535 uint32_t reg1_val;
1536 uint32_t reg2_val;
1537
1538 dprintk(1, "%s()\n", __func__);
1539
1540 /* Stop interrupts and DMA */
1541 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1542 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1543 /* just in case wait for any dma to complete before allowing dealloc */
1544 mdelay(20);
1545 for (delay = 0; delay < 100; delay++) {
1546 reg1_val = cx_read(TC_REQ);
1547 reg2_val = cx_read(TC_REQ_SET);
1548 if (reg1_val == 0 || reg2_val == 0)
1549 break;
1550 mdelay(1);
1551 }
1552 dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n",
1553 delay, reg1_val, reg2_val);
1554
1555 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1556 reg = cx_read(PAD_CTRL);
1557
1558 /* Set TS1_OE */
1559 reg = reg | 0x1;
1560
1561 /* clear TS1_SOP_OE and TS1_OE_HI */
1562 reg = reg & ~0xa;
1563 cx_write(PAD_CTRL, reg);
1564 cx_write(port->reg_src_sel, 0);
1565 cx_write(port->reg_gen_ctrl, 8);
1566 }
1567
1568 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1569 cx23885_av_clk(dev, 0);
1570
1571 return 0;
1572 }
1573
1574 /* ------------------------------------------------------------------ */
1575
cx23885_buf_prepare(struct cx23885_buffer * buf,struct cx23885_tsport * port)1576 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1577 {
1578 struct cx23885_dev *dev = port->dev;
1579 int size = port->ts_packet_size * port->ts_packet_count;
1580 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1581
1582 dprintk(1, "%s: %p\n", __func__, buf);
1583 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1584 return -EINVAL;
1585 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1586
1587 cx23885_risc_databuffer(dev->pci, &buf->risc,
1588 sgt->sgl,
1589 port->ts_packet_size, port->ts_packet_count, 0);
1590 return 0;
1591 }
1592
1593 /*
1594 * The risc program for each buffer works as follows: it starts with a simple
1595 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1596 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1597 * the initial JUMP).
1598 *
1599 * This is the risc program of the first buffer to be queued if the active list
1600 * is empty and it just keeps DMAing this buffer without generating any
1601 * interrupts.
1602 *
1603 * If a new buffer is added then the initial JUMP in the code for that buffer
1604 * will generate an interrupt which signals that the previous buffer has been
1605 * DMAed successfully and that it can be returned to userspace.
1606 *
1607 * It also sets the final jump of the previous buffer to the start of the new
1608 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1609 * atomic u32 write, so there is no race condition.
1610 *
1611 * The end-result of all this that you only get an interrupt when a buffer
1612 * is ready, so the control flow is very easy.
1613 */
cx23885_buf_queue(struct cx23885_tsport * port,struct cx23885_buffer * buf)1614 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1615 {
1616 struct cx23885_buffer *prev;
1617 struct cx23885_dev *dev = port->dev;
1618 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1619 unsigned long flags;
1620
1621 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1622 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1623 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1624 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1625
1626 spin_lock_irqsave(&dev->slock, flags);
1627 if (list_empty(&cx88q->active)) {
1628 list_add_tail(&buf->queue, &cx88q->active);
1629 dprintk(1, "[%p/%d] %s - first active\n",
1630 buf, buf->vb.vb2_buf.index, __func__);
1631 } else {
1632 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1633 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1634 queue);
1635 list_add_tail(&buf->queue, &cx88q->active);
1636 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1637 dprintk(1, "[%p/%d] %s - append to active\n",
1638 buf, buf->vb.vb2_buf.index, __func__);
1639 }
1640 spin_unlock_irqrestore(&dev->slock, flags);
1641 }
1642
1643 /* ----------------------------------------------------------- */
1644
do_cancel_buffers(struct cx23885_tsport * port,char * reason)1645 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1646 {
1647 struct cx23885_dmaqueue *q = &port->mpegq;
1648 struct cx23885_buffer *buf;
1649 unsigned long flags;
1650
1651 spin_lock_irqsave(&port->slock, flags);
1652 while (!list_empty(&q->active)) {
1653 buf = list_entry(q->active.next, struct cx23885_buffer,
1654 queue);
1655 list_del(&buf->queue);
1656 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1657 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1658 buf, buf->vb.vb2_buf.index, reason,
1659 (unsigned long)buf->risc.dma);
1660 }
1661 spin_unlock_irqrestore(&port->slock, flags);
1662 }
1663
cx23885_cancel_buffers(struct cx23885_tsport * port)1664 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1665 {
1666 dprintk(1, "%s()\n", __func__);
1667 cx23885_stop_dma(port);
1668 do_cancel_buffers(port, "cancel");
1669 }
1670
cx23885_irq_417(struct cx23885_dev * dev,u32 status)1671 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1672 {
1673 /* FIXME: port1 assumption here. */
1674 struct cx23885_tsport *port = &dev->ts1;
1675 int count = 0;
1676 int handled = 0;
1677
1678 if (status == 0)
1679 return handled;
1680
1681 count = cx_read(port->reg_gpcnt);
1682 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1683 status, cx_read(port->reg_ts_int_msk), count);
1684
1685 if ((status & VID_B_MSK_BAD_PKT) ||
1686 (status & VID_B_MSK_OPC_ERR) ||
1687 (status & VID_B_MSK_VBI_OPC_ERR) ||
1688 (status & VID_B_MSK_SYNC) ||
1689 (status & VID_B_MSK_VBI_SYNC) ||
1690 (status & VID_B_MSK_OF) ||
1691 (status & VID_B_MSK_VBI_OF)) {
1692 pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
1693 dev->name, status);
1694 if (status & VID_B_MSK_BAD_PKT)
1695 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1696 if (status & VID_B_MSK_OPC_ERR)
1697 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1698 if (status & VID_B_MSK_VBI_OPC_ERR)
1699 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1700 if (status & VID_B_MSK_SYNC)
1701 dprintk(1, " VID_B_MSK_SYNC\n");
1702 if (status & VID_B_MSK_VBI_SYNC)
1703 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1704 if (status & VID_B_MSK_OF)
1705 dprintk(1, " VID_B_MSK_OF\n");
1706 if (status & VID_B_MSK_VBI_OF)
1707 dprintk(1, " VID_B_MSK_VBI_OF\n");
1708
1709 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1710 cx23885_sram_channel_dump(dev,
1711 &dev->sram_channels[port->sram_chno]);
1712 cx23885_417_check_encoder(dev);
1713 } else if (status & VID_B_MSK_RISCI1) {
1714 dprintk(7, " VID_B_MSK_RISCI1\n");
1715 spin_lock(&port->slock);
1716 cx23885_wakeup(port, &port->mpegq, count);
1717 spin_unlock(&port->slock);
1718 }
1719 if (status) {
1720 cx_write(port->reg_ts_int_stat, status);
1721 handled = 1;
1722 }
1723
1724 return handled;
1725 }
1726
cx23885_irq_ts(struct cx23885_tsport * port,u32 status)1727 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1728 {
1729 struct cx23885_dev *dev = port->dev;
1730 int handled = 0;
1731 u32 count;
1732
1733 if ((status & VID_BC_MSK_OPC_ERR) ||
1734 (status & VID_BC_MSK_BAD_PKT) ||
1735 (status & VID_BC_MSK_SYNC) ||
1736 (status & VID_BC_MSK_OF)) {
1737
1738 if (status & VID_BC_MSK_OPC_ERR)
1739 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1740 VID_BC_MSK_OPC_ERR);
1741
1742 if (status & VID_BC_MSK_BAD_PKT)
1743 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1744 VID_BC_MSK_BAD_PKT);
1745
1746 if (status & VID_BC_MSK_SYNC)
1747 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1748 VID_BC_MSK_SYNC);
1749
1750 if (status & VID_BC_MSK_OF)
1751 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1752 VID_BC_MSK_OF);
1753
1754 pr_err("%s: mpeg risc op code error\n", dev->name);
1755
1756 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1757 cx23885_sram_channel_dump(dev,
1758 &dev->sram_channels[port->sram_chno]);
1759
1760 } else if (status & VID_BC_MSK_RISCI1) {
1761
1762 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1763
1764 spin_lock(&port->slock);
1765 count = cx_read(port->reg_gpcnt);
1766 cx23885_wakeup(port, &port->mpegq, count);
1767 spin_unlock(&port->slock);
1768
1769 }
1770 if (status) {
1771 cx_write(port->reg_ts_int_stat, status);
1772 handled = 1;
1773 }
1774
1775 return handled;
1776 }
1777
cx23885_irq(int irq,void * dev_id)1778 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1779 {
1780 struct cx23885_dev *dev = dev_id;
1781 struct cx23885_tsport *ts1 = &dev->ts1;
1782 struct cx23885_tsport *ts2 = &dev->ts2;
1783 u32 pci_status, pci_mask;
1784 u32 vida_status, vida_mask;
1785 u32 audint_status, audint_mask;
1786 u32 ts1_status, ts1_mask;
1787 u32 ts2_status, ts2_mask;
1788 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1789 int audint_count = 0;
1790 bool subdev_handled;
1791
1792 pci_status = cx_read(PCI_INT_STAT);
1793 pci_mask = cx23885_irq_get_mask(dev);
1794 if ((pci_status & pci_mask) == 0) {
1795 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1796 pci_status, pci_mask);
1797 goto out;
1798 }
1799
1800 vida_status = cx_read(VID_A_INT_STAT);
1801 vida_mask = cx_read(VID_A_INT_MSK);
1802 audint_status = cx_read(AUDIO_INT_INT_STAT);
1803 audint_mask = cx_read(AUDIO_INT_INT_MSK);
1804 ts1_status = cx_read(VID_B_INT_STAT);
1805 ts1_mask = cx_read(VID_B_INT_MSK);
1806 ts2_status = cx_read(VID_C_INT_STAT);
1807 ts2_mask = cx_read(VID_C_INT_MSK);
1808
1809 if (((pci_status & pci_mask) == 0) &&
1810 ((ts2_status & ts2_mask) == 0) &&
1811 ((ts1_status & ts1_mask) == 0))
1812 goto out;
1813
1814 vida_count = cx_read(VID_A_GPCNT);
1815 audint_count = cx_read(AUD_INT_A_GPCNT);
1816 ts1_count = cx_read(ts1->reg_gpcnt);
1817 ts2_count = cx_read(ts2->reg_gpcnt);
1818 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1819 pci_status, pci_mask);
1820 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1821 vida_status, vida_mask, vida_count);
1822 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1823 audint_status, audint_mask, audint_count);
1824 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1825 ts1_status, ts1_mask, ts1_count);
1826 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1827 ts2_status, ts2_mask, ts2_count);
1828
1829 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1830 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1831 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1832 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1833 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1834 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1835
1836 if (pci_status & PCI_MSK_RISC_RD)
1837 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1838 PCI_MSK_RISC_RD);
1839
1840 if (pci_status & PCI_MSK_RISC_WR)
1841 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1842 PCI_MSK_RISC_WR);
1843
1844 if (pci_status & PCI_MSK_AL_RD)
1845 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1846 PCI_MSK_AL_RD);
1847
1848 if (pci_status & PCI_MSK_AL_WR)
1849 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1850 PCI_MSK_AL_WR);
1851
1852 if (pci_status & PCI_MSK_APB_DMA)
1853 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1854 PCI_MSK_APB_DMA);
1855
1856 if (pci_status & PCI_MSK_VID_C)
1857 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1858 PCI_MSK_VID_C);
1859
1860 if (pci_status & PCI_MSK_VID_B)
1861 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1862 PCI_MSK_VID_B);
1863
1864 if (pci_status & PCI_MSK_VID_A)
1865 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1866 PCI_MSK_VID_A);
1867
1868 if (pci_status & PCI_MSK_AUD_INT)
1869 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1870 PCI_MSK_AUD_INT);
1871
1872 if (pci_status & PCI_MSK_AUD_EXT)
1873 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1874 PCI_MSK_AUD_EXT);
1875
1876 if (pci_status & PCI_MSK_GPIO0)
1877 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1878 PCI_MSK_GPIO0);
1879
1880 if (pci_status & PCI_MSK_GPIO1)
1881 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1882 PCI_MSK_GPIO1);
1883
1884 if (pci_status & PCI_MSK_AV_CORE)
1885 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1886 PCI_MSK_AV_CORE);
1887
1888 if (pci_status & PCI_MSK_IR)
1889 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1890 PCI_MSK_IR);
1891 }
1892
1893 if (cx23885_boards[dev->board].ci_type == 1 &&
1894 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1895 handled += netup_ci_slot_status(dev, pci_status);
1896
1897 if (cx23885_boards[dev->board].ci_type == 2 &&
1898 (pci_status & PCI_MSK_GPIO0))
1899 handled += altera_ci_irq(dev);
1900
1901 if (ts1_status) {
1902 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1903 handled += cx23885_irq_ts(ts1, ts1_status);
1904 else
1905 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1906 handled += cx23885_irq_417(dev, ts1_status);
1907 }
1908
1909 if (ts2_status) {
1910 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1911 handled += cx23885_irq_ts(ts2, ts2_status);
1912 else
1913 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1914 handled += cx23885_irq_417(dev, ts2_status);
1915 }
1916
1917 if (vida_status)
1918 handled += cx23885_video_irq(dev, vida_status);
1919
1920 if (audint_status)
1921 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1922
1923 if (pci_status & PCI_MSK_IR) {
1924 subdev_handled = false;
1925 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1926 pci_status, &subdev_handled);
1927 if (subdev_handled)
1928 handled++;
1929 }
1930
1931 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1932 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1933 schedule_work(&dev->cx25840_work);
1934 handled++;
1935 }
1936
1937 if (handled)
1938 cx_write(PCI_INT_STAT, pci_status & pci_mask);
1939 out:
1940 return IRQ_RETVAL(handled);
1941 }
1942
cx23885_v4l2_dev_notify(struct v4l2_subdev * sd,unsigned int notification,void * arg)1943 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1944 unsigned int notification, void *arg)
1945 {
1946 struct cx23885_dev *dev;
1947
1948 if (sd == NULL)
1949 return;
1950
1951 dev = to_cx23885(sd->v4l2_dev);
1952
1953 switch (notification) {
1954 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1955 if (sd == dev->sd_ir)
1956 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1957 break;
1958 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1959 if (sd == dev->sd_ir)
1960 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1961 break;
1962 }
1963 }
1964
cx23885_v4l2_dev_notify_init(struct cx23885_dev * dev)1965 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1966 {
1967 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1968 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1969 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1970 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1971 }
1972
encoder_on_portb(struct cx23885_dev * dev)1973 static inline int encoder_on_portb(struct cx23885_dev *dev)
1974 {
1975 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1976 }
1977
encoder_on_portc(struct cx23885_dev * dev)1978 static inline int encoder_on_portc(struct cx23885_dev *dev)
1979 {
1980 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1981 }
1982
1983 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1984 * registers depending on the board configuration (and whether the
1985 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1986 * be pushed into the correct hardware register, regardless of the
1987 * physical location. Certain registers are shared so we sanity check
1988 * and report errors if we think we're tampering with a GPIo that might
1989 * be assigned to the encoder (and used for the host bus).
1990 *
1991 * GPIO 2 through 0 - On the cx23885 bridge
1992 * GPIO 18 through 3 - On the cx23417 host bus interface
1993 * GPIO 23 through 19 - On the cx25840 a/v core
1994 */
cx23885_gpio_set(struct cx23885_dev * dev,u32 mask)1995 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1996 {
1997 if (mask & 0x7)
1998 cx_set(GP0_IO, mask & 0x7);
1999
2000 if (mask & 0x0007fff8) {
2001 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2002 pr_err("%s: Setting GPIO on encoder ports\n",
2003 dev->name);
2004 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
2005 }
2006
2007 /* TODO: 23-19 */
2008 if (mask & 0x00f80000)
2009 pr_info("%s: Unsupported\n", dev->name);
2010 }
2011
cx23885_gpio_clear(struct cx23885_dev * dev,u32 mask)2012 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
2013 {
2014 if (mask & 0x00000007)
2015 cx_clear(GP0_IO, mask & 0x7);
2016
2017 if (mask & 0x0007fff8) {
2018 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2019 pr_err("%s: Clearing GPIO moving on encoder ports\n",
2020 dev->name);
2021 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
2022 }
2023
2024 /* TODO: 23-19 */
2025 if (mask & 0x00f80000)
2026 pr_info("%s: Unsupported\n", dev->name);
2027 }
2028
cx23885_gpio_get(struct cx23885_dev * dev,u32 mask)2029 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
2030 {
2031 if (mask & 0x00000007)
2032 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
2033
2034 if (mask & 0x0007fff8) {
2035 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2036 pr_err("%s: Reading GPIO moving on encoder ports\n",
2037 dev->name);
2038 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
2039 }
2040
2041 /* TODO: 23-19 */
2042 if (mask & 0x00f80000)
2043 pr_info("%s: Unsupported\n", dev->name);
2044
2045 return 0;
2046 }
2047
cx23885_gpio_enable(struct cx23885_dev * dev,u32 mask,int asoutput)2048 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
2049 {
2050 if ((mask & 0x00000007) && asoutput)
2051 cx_set(GP0_IO, (mask & 0x7) << 16);
2052 else if ((mask & 0x00000007) && !asoutput)
2053 cx_clear(GP0_IO, (mask & 0x7) << 16);
2054
2055 if (mask & 0x0007fff8) {
2056 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2057 pr_err("%s: Enabling GPIO on encoder ports\n",
2058 dev->name);
2059 }
2060
2061 /* MC417_OEN is active low for output, write 1 for an input */
2062 if ((mask & 0x0007fff8) && asoutput)
2063 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2064
2065 else if ((mask & 0x0007fff8) && !asoutput)
2066 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2067
2068 /* TODO: 23-19 */
2069 }
2070
2071 static struct {
2072 int vendor, dev;
2073 } const broken_dev_id[] = {
2074 /* According with
2075 * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
2076 * 0x1451 is PCI ID for the IOMMU found on Ryzen
2077 */
2078 { PCI_VENDOR_ID_AMD, 0x1451 },
2079 /* According to sudo lspci -nn,
2080 * 0x1423 is the PCI ID for the IOMMU found on Kaveri
2081 */
2082 { PCI_VENDOR_ID_AMD, 0x1423 },
2083 /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
2084 */
2085 { PCI_VENDOR_ID_AMD, 0x1481 },
2086 /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
2087 */
2088 { PCI_VENDOR_ID_AMD, 0x1419 },
2089 /* 0x1631 is the PCI ID for the IOMMU found on Renoir/Cezanne
2090 */
2091 { PCI_VENDOR_ID_AMD, 0x1631 },
2092 /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
2093 */
2094 { PCI_VENDOR_ID_ATI, 0x5a23 },
2095 };
2096
cx23885_does_need_dma_reset(void)2097 static bool cx23885_does_need_dma_reset(void)
2098 {
2099 int i;
2100 struct pci_dev *pdev = NULL;
2101
2102 if (dma_reset_workaround == 0)
2103 return false;
2104 else if (dma_reset_workaround == 2)
2105 return true;
2106
2107 for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
2108 pdev = pci_get_device(broken_dev_id[i].vendor,
2109 broken_dev_id[i].dev, NULL);
2110 if (pdev) {
2111 pci_dev_put(pdev);
2112 return true;
2113 }
2114 }
2115 return false;
2116 }
2117
cx23885_initdev(struct pci_dev * pci_dev,const struct pci_device_id * pci_id)2118 static int cx23885_initdev(struct pci_dev *pci_dev,
2119 const struct pci_device_id *pci_id)
2120 {
2121 struct cx23885_dev *dev;
2122 struct v4l2_ctrl_handler *hdl;
2123 int err;
2124
2125 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2126 if (NULL == dev)
2127 return -ENOMEM;
2128
2129 dev->need_dma_reset = cx23885_does_need_dma_reset();
2130
2131 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2132 if (err < 0)
2133 goto fail_free;
2134
2135 hdl = &dev->ctrl_handler;
2136 v4l2_ctrl_handler_init(hdl, 6);
2137 if (hdl->error) {
2138 err = hdl->error;
2139 goto fail_ctrl;
2140 }
2141 dev->v4l2_dev.ctrl_handler = hdl;
2142
2143 /* Prepare to handle notifications from subdevices */
2144 cx23885_v4l2_dev_notify_init(dev);
2145
2146 /* pci init */
2147 dev->pci = pci_dev;
2148 if (pci_enable_device(pci_dev)) {
2149 err = -EIO;
2150 goto fail_ctrl;
2151 }
2152
2153 if (cx23885_dev_setup(dev) < 0) {
2154 err = -EINVAL;
2155 goto fail_ctrl;
2156 }
2157
2158 /* print pci info */
2159 dev->pci_rev = pci_dev->revision;
2160 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2161 pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
2162 dev->name,
2163 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2164 dev->pci_lat,
2165 (unsigned long long)pci_resource_start(pci_dev, 0));
2166
2167 pci_set_master(pci_dev);
2168 err = dma_set_mask(&pci_dev->dev, 0xffffffff);
2169 if (err) {
2170 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2171 goto fail_dma_set_mask;
2172 }
2173
2174 err = request_irq(pci_dev->irq, cx23885_irq,
2175 IRQF_SHARED, dev->name, dev);
2176 if (err < 0) {
2177 pr_err("%s: can't get IRQ %d\n",
2178 dev->name, pci_dev->irq);
2179 goto fail_dma_set_mask;
2180 }
2181
2182 switch (dev->board) {
2183 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2184 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2185 break;
2186 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2187 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2188 break;
2189 }
2190
2191 /*
2192 * The CX2388[58] IR controller can start firing interrupts when
2193 * enabled, so these have to take place after the cx23885_irq() handler
2194 * is hooked up by the call to request_irq() above.
2195 */
2196 cx23885_ir_pci_int_enable(dev);
2197 cx23885_input_init(dev);
2198
2199 return 0;
2200
2201 fail_dma_set_mask:
2202 cx23885_dev_unregister(dev);
2203 fail_ctrl:
2204 v4l2_ctrl_handler_free(hdl);
2205 v4l2_device_unregister(&dev->v4l2_dev);
2206 fail_free:
2207 kfree(dev);
2208 return err;
2209 }
2210
cx23885_finidev(struct pci_dev * pci_dev)2211 static void cx23885_finidev(struct pci_dev *pci_dev)
2212 {
2213 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2214 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2215
2216 cx23885_input_fini(dev);
2217 cx23885_ir_fini(dev);
2218
2219 cx23885_shutdown(dev);
2220
2221 /* unregister stuff */
2222 free_irq(pci_dev->irq, dev);
2223
2224 pci_disable_device(pci_dev);
2225
2226 cx23885_dev_unregister(dev);
2227 v4l2_ctrl_handler_free(&dev->ctrl_handler);
2228 v4l2_device_unregister(v4l2_dev);
2229 kfree(dev);
2230 }
2231
2232 static const struct pci_device_id cx23885_pci_tbl[] = {
2233 {
2234 /* CX23885 */
2235 .vendor = 0x14f1,
2236 .device = 0x8852,
2237 .subvendor = PCI_ANY_ID,
2238 .subdevice = PCI_ANY_ID,
2239 }, {
2240 /* CX23887 Rev 2 */
2241 .vendor = 0x14f1,
2242 .device = 0x8880,
2243 .subvendor = PCI_ANY_ID,
2244 .subdevice = PCI_ANY_ID,
2245 }, {
2246 /* --- end of list --- */
2247 }
2248 };
2249 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2250
2251 static struct pci_driver cx23885_pci_driver = {
2252 .name = "cx23885",
2253 .id_table = cx23885_pci_tbl,
2254 .probe = cx23885_initdev,
2255 .remove = cx23885_finidev,
2256 };
2257
cx23885_init(void)2258 static int __init cx23885_init(void)
2259 {
2260 pr_info("cx23885 driver version %s loaded\n",
2261 CX23885_VERSION);
2262 return pci_register_driver(&cx23885_pci_driver);
2263 }
2264
cx23885_fini(void)2265 static void __exit cx23885_fini(void)
2266 {
2267 pci_unregister_driver(&cx23885_pci_driver);
2268 }
2269
2270 module_init(cx23885_init);
2271 module_exit(cx23885_fini);
2272