1 /*
2 * Driver for the Conexant CX23885 PCIe bridge
3 *
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
32 #include <linux/firmware.h>
33
34 #include "cx23885.h"
35 #include "cimax2.h"
36 #include "altera-ci.h"
37 #include "cx23888-ir.h"
38 #include "cx23885-ir.h"
39 #include "cx23885-av.h"
40 #include "cx23885-input.h"
41
42 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
43 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
44 MODULE_LICENSE("GPL");
45
46 static unsigned int debug;
47 module_param(debug, int, 0644);
48 MODULE_PARM_DESC(debug, "enable debug messages");
49
50 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
51 module_param_array(card, int, NULL, 0444);
52 MODULE_PARM_DESC(card, "card type");
53
54 #define dprintk(level, fmt, arg...)\
55 do { if (debug >= level)\
56 printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
57 } while (0)
58
59 static unsigned int cx23885_devcount;
60
61 #define NO_SYNC_LINE (-1U)
62
63 /* FIXME, these allocations will change when
64 * analog arrives. The be reviewed.
65 * CX23887 Assumptions
66 * 1 line = 16 bytes of CDT
67 * cmds size = 80
68 * cdt size = 16 * linesize
69 * iqsize = 64
70 * maxlines = 6
71 *
72 * Address Space:
73 * 0x00000000 0x00008fff FIFO clusters
74 * 0x00010000 0x000104af Channel Management Data Structures
75 * 0x000104b0 0x000104ff Free
76 * 0x00010500 0x000108bf 15 channels * iqsize
77 * 0x000108c0 0x000108ff Free
78 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
79 * 15 channels * (iqsize + (maxlines * linesize))
80 * 0x00010ea0 0x00010xxx Free
81 */
82
83 static struct sram_channel cx23885_sram_channels[] = {
84 [SRAM_CH01] = {
85 .name = "VID A",
86 .cmds_start = 0x10000,
87 .ctrl_start = 0x10380,
88 .cdt = 0x104c0,
89 .fifo_start = 0x40,
90 .fifo_size = 0x2800,
91 .ptr1_reg = DMA1_PTR1,
92 .ptr2_reg = DMA1_PTR2,
93 .cnt1_reg = DMA1_CNT1,
94 .cnt2_reg = DMA1_CNT2,
95 },
96 [SRAM_CH02] = {
97 .name = "ch2",
98 .cmds_start = 0x0,
99 .ctrl_start = 0x0,
100 .cdt = 0x0,
101 .fifo_start = 0x0,
102 .fifo_size = 0x0,
103 .ptr1_reg = DMA2_PTR1,
104 .ptr2_reg = DMA2_PTR2,
105 .cnt1_reg = DMA2_CNT1,
106 .cnt2_reg = DMA2_CNT2,
107 },
108 [SRAM_CH03] = {
109 .name = "TS1 B",
110 .cmds_start = 0x100A0,
111 .ctrl_start = 0x10400,
112 .cdt = 0x10580,
113 .fifo_start = 0x5000,
114 .fifo_size = 0x1000,
115 .ptr1_reg = DMA3_PTR1,
116 .ptr2_reg = DMA3_PTR2,
117 .cnt1_reg = DMA3_CNT1,
118 .cnt2_reg = DMA3_CNT2,
119 },
120 [SRAM_CH04] = {
121 .name = "ch4",
122 .cmds_start = 0x0,
123 .ctrl_start = 0x0,
124 .cdt = 0x0,
125 .fifo_start = 0x0,
126 .fifo_size = 0x0,
127 .ptr1_reg = DMA4_PTR1,
128 .ptr2_reg = DMA4_PTR2,
129 .cnt1_reg = DMA4_CNT1,
130 .cnt2_reg = DMA4_CNT2,
131 },
132 [SRAM_CH05] = {
133 .name = "ch5",
134 .cmds_start = 0x0,
135 .ctrl_start = 0x0,
136 .cdt = 0x0,
137 .fifo_start = 0x0,
138 .fifo_size = 0x0,
139 .ptr1_reg = DMA5_PTR1,
140 .ptr2_reg = DMA5_PTR2,
141 .cnt1_reg = DMA5_CNT1,
142 .cnt2_reg = DMA5_CNT2,
143 },
144 [SRAM_CH06] = {
145 .name = "TS2 C",
146 .cmds_start = 0x10140,
147 .ctrl_start = 0x10440,
148 .cdt = 0x105e0,
149 .fifo_start = 0x6000,
150 .fifo_size = 0x1000,
151 .ptr1_reg = DMA5_PTR1,
152 .ptr2_reg = DMA5_PTR2,
153 .cnt1_reg = DMA5_CNT1,
154 .cnt2_reg = DMA5_CNT2,
155 },
156 [SRAM_CH07] = {
157 .name = "ch7",
158 .cmds_start = 0x0,
159 .ctrl_start = 0x0,
160 .cdt = 0x0,
161 .fifo_start = 0x0,
162 .fifo_size = 0x0,
163 .ptr1_reg = DMA6_PTR1,
164 .ptr2_reg = DMA6_PTR2,
165 .cnt1_reg = DMA6_CNT1,
166 .cnt2_reg = DMA6_CNT2,
167 },
168 [SRAM_CH08] = {
169 .name = "ch8",
170 .cmds_start = 0x0,
171 .ctrl_start = 0x0,
172 .cdt = 0x0,
173 .fifo_start = 0x0,
174 .fifo_size = 0x0,
175 .ptr1_reg = DMA7_PTR1,
176 .ptr2_reg = DMA7_PTR2,
177 .cnt1_reg = DMA7_CNT1,
178 .cnt2_reg = DMA7_CNT2,
179 },
180 [SRAM_CH09] = {
181 .name = "ch9",
182 .cmds_start = 0x0,
183 .ctrl_start = 0x0,
184 .cdt = 0x0,
185 .fifo_start = 0x0,
186 .fifo_size = 0x0,
187 .ptr1_reg = DMA8_PTR1,
188 .ptr2_reg = DMA8_PTR2,
189 .cnt1_reg = DMA8_CNT1,
190 .cnt2_reg = DMA8_CNT2,
191 },
192 };
193
194 static struct sram_channel cx23887_sram_channels[] = {
195 [SRAM_CH01] = {
196 .name = "VID A",
197 .cmds_start = 0x10000,
198 .ctrl_start = 0x105b0,
199 .cdt = 0x107b0,
200 .fifo_start = 0x40,
201 .fifo_size = 0x2800,
202 .ptr1_reg = DMA1_PTR1,
203 .ptr2_reg = DMA1_PTR2,
204 .cnt1_reg = DMA1_CNT1,
205 .cnt2_reg = DMA1_CNT2,
206 },
207 [SRAM_CH02] = {
208 .name = "ch2",
209 .cmds_start = 0x0,
210 .ctrl_start = 0x0,
211 .cdt = 0x0,
212 .fifo_start = 0x0,
213 .fifo_size = 0x0,
214 .ptr1_reg = DMA2_PTR1,
215 .ptr2_reg = DMA2_PTR2,
216 .cnt1_reg = DMA2_CNT1,
217 .cnt2_reg = DMA2_CNT2,
218 },
219 [SRAM_CH03] = {
220 .name = "TS1 B",
221 .cmds_start = 0x100A0,
222 .ctrl_start = 0x10630,
223 .cdt = 0x10870,
224 .fifo_start = 0x5000,
225 .fifo_size = 0x1000,
226 .ptr1_reg = DMA3_PTR1,
227 .ptr2_reg = DMA3_PTR2,
228 .cnt1_reg = DMA3_CNT1,
229 .cnt2_reg = DMA3_CNT2,
230 },
231 [SRAM_CH04] = {
232 .name = "ch4",
233 .cmds_start = 0x0,
234 .ctrl_start = 0x0,
235 .cdt = 0x0,
236 .fifo_start = 0x0,
237 .fifo_size = 0x0,
238 .ptr1_reg = DMA4_PTR1,
239 .ptr2_reg = DMA4_PTR2,
240 .cnt1_reg = DMA4_CNT1,
241 .cnt2_reg = DMA4_CNT2,
242 },
243 [SRAM_CH05] = {
244 .name = "ch5",
245 .cmds_start = 0x0,
246 .ctrl_start = 0x0,
247 .cdt = 0x0,
248 .fifo_start = 0x0,
249 .fifo_size = 0x0,
250 .ptr1_reg = DMA5_PTR1,
251 .ptr2_reg = DMA5_PTR2,
252 .cnt1_reg = DMA5_CNT1,
253 .cnt2_reg = DMA5_CNT2,
254 },
255 [SRAM_CH06] = {
256 .name = "TS2 C",
257 .cmds_start = 0x10140,
258 .ctrl_start = 0x10670,
259 .cdt = 0x108d0,
260 .fifo_start = 0x6000,
261 .fifo_size = 0x1000,
262 .ptr1_reg = DMA5_PTR1,
263 .ptr2_reg = DMA5_PTR2,
264 .cnt1_reg = DMA5_CNT1,
265 .cnt2_reg = DMA5_CNT2,
266 },
267 [SRAM_CH07] = {
268 .name = "ch7",
269 .cmds_start = 0x0,
270 .ctrl_start = 0x0,
271 .cdt = 0x0,
272 .fifo_start = 0x0,
273 .fifo_size = 0x0,
274 .ptr1_reg = DMA6_PTR1,
275 .ptr2_reg = DMA6_PTR2,
276 .cnt1_reg = DMA6_CNT1,
277 .cnt2_reg = DMA6_CNT2,
278 },
279 [SRAM_CH08] = {
280 .name = "ch8",
281 .cmds_start = 0x0,
282 .ctrl_start = 0x0,
283 .cdt = 0x0,
284 .fifo_start = 0x0,
285 .fifo_size = 0x0,
286 .ptr1_reg = DMA7_PTR1,
287 .ptr2_reg = DMA7_PTR2,
288 .cnt1_reg = DMA7_CNT1,
289 .cnt2_reg = DMA7_CNT2,
290 },
291 [SRAM_CH09] = {
292 .name = "ch9",
293 .cmds_start = 0x0,
294 .ctrl_start = 0x0,
295 .cdt = 0x0,
296 .fifo_start = 0x0,
297 .fifo_size = 0x0,
298 .ptr1_reg = DMA8_PTR1,
299 .ptr2_reg = DMA8_PTR2,
300 .cnt1_reg = DMA8_CNT1,
301 .cnt2_reg = DMA8_CNT2,
302 },
303 };
304
cx23885_irq_add(struct cx23885_dev * dev,u32 mask)305 void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
306 {
307 unsigned long flags;
308 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
309
310 dev->pci_irqmask |= mask;
311
312 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
313 }
314
cx23885_irq_add_enable(struct cx23885_dev * dev,u32 mask)315 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
316 {
317 unsigned long flags;
318 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
319
320 dev->pci_irqmask |= mask;
321 cx_set(PCI_INT_MSK, mask);
322
323 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
324 }
325
cx23885_irq_enable(struct cx23885_dev * dev,u32 mask)326 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
327 {
328 u32 v;
329 unsigned long flags;
330 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
331
332 v = mask & dev->pci_irqmask;
333 if (v)
334 cx_set(PCI_INT_MSK, v);
335
336 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
337 }
338
cx23885_irq_enable_all(struct cx23885_dev * dev)339 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
340 {
341 cx23885_irq_enable(dev, 0xffffffff);
342 }
343
cx23885_irq_disable(struct cx23885_dev * dev,u32 mask)344 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
345 {
346 unsigned long flags;
347 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
348
349 cx_clear(PCI_INT_MSK, mask);
350
351 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
352 }
353
cx23885_irq_disable_all(struct cx23885_dev * dev)354 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
355 {
356 cx23885_irq_disable(dev, 0xffffffff);
357 }
358
cx23885_irq_remove(struct cx23885_dev * dev,u32 mask)359 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
360 {
361 unsigned long flags;
362 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
363
364 dev->pci_irqmask &= ~mask;
365 cx_clear(PCI_INT_MSK, mask);
366
367 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
368 }
369
cx23885_irq_get_mask(struct cx23885_dev * dev)370 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
371 {
372 u32 v;
373 unsigned long flags;
374 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
375
376 v = cx_read(PCI_INT_MSK);
377
378 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
379 return v;
380 }
381
cx23885_risc_decode(u32 risc)382 static int cx23885_risc_decode(u32 risc)
383 {
384 static char *instr[16] = {
385 [RISC_SYNC >> 28] = "sync",
386 [RISC_WRITE >> 28] = "write",
387 [RISC_WRITEC >> 28] = "writec",
388 [RISC_READ >> 28] = "read",
389 [RISC_READC >> 28] = "readc",
390 [RISC_JUMP >> 28] = "jump",
391 [RISC_SKIP >> 28] = "skip",
392 [RISC_WRITERM >> 28] = "writerm",
393 [RISC_WRITECM >> 28] = "writecm",
394 [RISC_WRITECR >> 28] = "writecr",
395 };
396 static int incr[16] = {
397 [RISC_WRITE >> 28] = 3,
398 [RISC_JUMP >> 28] = 3,
399 [RISC_SKIP >> 28] = 1,
400 [RISC_SYNC >> 28] = 1,
401 [RISC_WRITERM >> 28] = 3,
402 [RISC_WRITECM >> 28] = 3,
403 [RISC_WRITECR >> 28] = 4,
404 };
405 static char *bits[] = {
406 "12", "13", "14", "resync",
407 "cnt0", "cnt1", "18", "19",
408 "20", "21", "22", "23",
409 "irq1", "irq2", "eol", "sol",
410 };
411 int i;
412
413 printk("0x%08x [ %s", risc,
414 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
415 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
416 if (risc & (1 << (i + 12)))
417 printk(" %s", bits[i]);
418 printk(" count=%d ]\n", risc & 0xfff);
419 return incr[risc >> 28] ? incr[risc >> 28] : 1;
420 }
421
cx23885_wakeup(struct cx23885_tsport * port,struct cx23885_dmaqueue * q,u32 count)422 void cx23885_wakeup(struct cx23885_tsport *port,
423 struct cx23885_dmaqueue *q, u32 count)
424 {
425 struct cx23885_dev *dev = port->dev;
426 struct cx23885_buffer *buf;
427 int bc;
428
429 for (bc = 0;; bc++) {
430 if (list_empty(&q->active))
431 break;
432 buf = list_entry(q->active.next,
433 struct cx23885_buffer, vb.queue);
434
435 /* count comes from the hw and is is 16bit wide --
436 * this trick handles wrap-arounds correctly for
437 * up to 32767 buffers in flight... */
438 if ((s16) (count - buf->count) < 0)
439 break;
440
441 do_gettimeofday(&buf->vb.ts);
442 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
443 count, buf->count);
444 buf->vb.state = VIDEOBUF_DONE;
445 list_del(&buf->vb.queue);
446 wake_up(&buf->vb.done);
447 }
448 if (list_empty(&q->active))
449 del_timer(&q->timeout);
450 else
451 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
452 if (bc != 1)
453 printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
454 __func__, bc);
455 }
456
cx23885_sram_channel_setup(struct cx23885_dev * dev,struct sram_channel * ch,unsigned int bpl,u32 risc)457 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
458 struct sram_channel *ch,
459 unsigned int bpl, u32 risc)
460 {
461 unsigned int i, lines;
462 u32 cdt;
463
464 if (ch->cmds_start == 0) {
465 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
466 ch->name);
467 cx_write(ch->ptr1_reg, 0);
468 cx_write(ch->ptr2_reg, 0);
469 cx_write(ch->cnt2_reg, 0);
470 cx_write(ch->cnt1_reg, 0);
471 return 0;
472 } else {
473 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
474 ch->name);
475 }
476
477 bpl = (bpl + 7) & ~7; /* alignment */
478 cdt = ch->cdt;
479 lines = ch->fifo_size / bpl;
480 if (lines > 6)
481 lines = 6;
482 BUG_ON(lines < 2);
483
484 cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
485 cx_write(8 + 4, 8);
486 cx_write(8 + 8, 0);
487
488 /* write CDT */
489 for (i = 0; i < lines; i++) {
490 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
491 ch->fifo_start + bpl*i);
492 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
493 cx_write(cdt + 16*i + 4, 0);
494 cx_write(cdt + 16*i + 8, 0);
495 cx_write(cdt + 16*i + 12, 0);
496 }
497
498 /* write CMDS */
499 if (ch->jumponly)
500 cx_write(ch->cmds_start + 0, 8);
501 else
502 cx_write(ch->cmds_start + 0, risc);
503 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
504 cx_write(ch->cmds_start + 8, cdt);
505 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
506 cx_write(ch->cmds_start + 16, ch->ctrl_start);
507 if (ch->jumponly)
508 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
509 else
510 cx_write(ch->cmds_start + 20, 64 >> 2);
511 for (i = 24; i < 80; i += 4)
512 cx_write(ch->cmds_start + i, 0);
513
514 /* fill registers */
515 cx_write(ch->ptr1_reg, ch->fifo_start);
516 cx_write(ch->ptr2_reg, cdt);
517 cx_write(ch->cnt2_reg, (lines*16) >> 3);
518 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
519
520 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
521 dev->bridge,
522 ch->name,
523 bpl,
524 lines);
525
526 return 0;
527 }
528
cx23885_sram_channel_dump(struct cx23885_dev * dev,struct sram_channel * ch)529 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
530 struct sram_channel *ch)
531 {
532 static char *name[] = {
533 "init risc lo",
534 "init risc hi",
535 "cdt base",
536 "cdt size",
537 "iq base",
538 "iq size",
539 "risc pc lo",
540 "risc pc hi",
541 "iq wr ptr",
542 "iq rd ptr",
543 "cdt current",
544 "pci target lo",
545 "pci target hi",
546 "line / byte",
547 };
548 u32 risc;
549 unsigned int i, j, n;
550
551 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
552 dev->name, ch->name);
553 for (i = 0; i < ARRAY_SIZE(name); i++)
554 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
555 dev->name, name[i],
556 cx_read(ch->cmds_start + 4*i));
557
558 for (i = 0; i < 4; i++) {
559 risc = cx_read(ch->cmds_start + 4 * (i + 14));
560 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
561 cx23885_risc_decode(risc);
562 }
563 for (i = 0; i < (64 >> 2); i += n) {
564 risc = cx_read(ch->ctrl_start + 4 * i);
565 /* No consideration for bits 63-32 */
566
567 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
568 ch->ctrl_start + 4 * i, i);
569 n = cx23885_risc_decode(risc);
570 for (j = 1; j < n; j++) {
571 risc = cx_read(ch->ctrl_start + 4 * (i + j));
572 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
573 dev->name, i+j, risc, j);
574 }
575 }
576
577 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
578 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
579 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
580 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
581 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
582 dev->name, cx_read(ch->ptr1_reg));
583 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
584 dev->name, cx_read(ch->ptr2_reg));
585 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
586 dev->name, cx_read(ch->cnt1_reg));
587 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
588 dev->name, cx_read(ch->cnt2_reg));
589 }
590
cx23885_risc_disasm(struct cx23885_tsport * port,struct btcx_riscmem * risc)591 static void cx23885_risc_disasm(struct cx23885_tsport *port,
592 struct btcx_riscmem *risc)
593 {
594 struct cx23885_dev *dev = port->dev;
595 unsigned int i, j, n;
596
597 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
598 dev->name, risc->cpu, (unsigned long)risc->dma);
599 for (i = 0; i < (risc->size >> 2); i += n) {
600 printk(KERN_INFO "%s: %04d: ", dev->name, i);
601 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
602 for (j = 1; j < n; j++)
603 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
604 dev->name, i + j, risc->cpu[i + j], j);
605 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
606 break;
607 }
608 }
609
cx23885_shutdown(struct cx23885_dev * dev)610 static void cx23885_shutdown(struct cx23885_dev *dev)
611 {
612 /* disable RISC controller */
613 cx_write(DEV_CNTRL2, 0);
614
615 /* Disable all IR activity */
616 cx_write(IR_CNTRL_REG, 0);
617
618 /* Disable Video A/B activity */
619 cx_write(VID_A_DMA_CTL, 0);
620 cx_write(VID_B_DMA_CTL, 0);
621 cx_write(VID_C_DMA_CTL, 0);
622
623 /* Disable Audio activity */
624 cx_write(AUD_INT_DMA_CTL, 0);
625 cx_write(AUD_EXT_DMA_CTL, 0);
626
627 /* Disable Serial port */
628 cx_write(UART_CTL, 0);
629
630 /* Disable Interrupts */
631 cx23885_irq_disable_all(dev);
632 cx_write(VID_A_INT_MSK, 0);
633 cx_write(VID_B_INT_MSK, 0);
634 cx_write(VID_C_INT_MSK, 0);
635 cx_write(AUDIO_INT_INT_MSK, 0);
636 cx_write(AUDIO_EXT_INT_MSK, 0);
637
638 }
639
cx23885_reset(struct cx23885_dev * dev)640 static void cx23885_reset(struct cx23885_dev *dev)
641 {
642 dprintk(1, "%s()\n", __func__);
643
644 cx23885_shutdown(dev);
645
646 cx_write(PCI_INT_STAT, 0xffffffff);
647 cx_write(VID_A_INT_STAT, 0xffffffff);
648 cx_write(VID_B_INT_STAT, 0xffffffff);
649 cx_write(VID_C_INT_STAT, 0xffffffff);
650 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
651 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
652 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
653 cx_write(PAD_CTRL, 0x00500300);
654
655 mdelay(100);
656
657 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
658 720*4, 0);
659 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
660 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
661 188*4, 0);
662 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
663 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
664 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
665 188*4, 0);
666 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
667 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
668 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
669
670 cx23885_gpio_setup(dev);
671 }
672
673
cx23885_pci_quirks(struct cx23885_dev * dev)674 static int cx23885_pci_quirks(struct cx23885_dev *dev)
675 {
676 dprintk(1, "%s()\n", __func__);
677
678 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
679 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
680 * occur on the cx23887 bridge.
681 */
682 if (dev->bridge == CX23885_BRIDGE_885)
683 cx_clear(RDR_TLCTL0, 1 << 4);
684
685 return 0;
686 }
687
get_resources(struct cx23885_dev * dev)688 static int get_resources(struct cx23885_dev *dev)
689 {
690 if (request_mem_region(pci_resource_start(dev->pci, 0),
691 pci_resource_len(dev->pci, 0),
692 dev->name))
693 return 0;
694
695 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
696 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
697
698 return -EBUSY;
699 }
700
701 static void cx23885_timeout(unsigned long data);
702 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
703 u32 reg, u32 mask, u32 value);
704
cx23885_init_tsport(struct cx23885_dev * dev,struct cx23885_tsport * port,int portno)705 static int cx23885_init_tsport(struct cx23885_dev *dev,
706 struct cx23885_tsport *port, int portno)
707 {
708 dprintk(1, "%s(portno=%d)\n", __func__, portno);
709
710 /* Transport bus init dma queue - Common settings */
711 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
712 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
713 port->vld_misc_val = 0x0;
714 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
715
716 spin_lock_init(&port->slock);
717 port->dev = dev;
718 port->nr = portno;
719
720 INIT_LIST_HEAD(&port->mpegq.active);
721 INIT_LIST_HEAD(&port->mpegq.queued);
722 port->mpegq.timeout.function = cx23885_timeout;
723 port->mpegq.timeout.data = (unsigned long)port;
724 init_timer(&port->mpegq.timeout);
725
726 mutex_init(&port->frontends.lock);
727 INIT_LIST_HEAD(&port->frontends.felist);
728 port->frontends.active_fe_id = 0;
729
730 /* This should be hardcoded allow a single frontend
731 * attachment to this tsport, keeping the -dvb.c
732 * code clean and safe.
733 */
734 if (!port->num_frontends)
735 port->num_frontends = 1;
736
737 switch (portno) {
738 case 1:
739 port->reg_gpcnt = VID_B_GPCNT;
740 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
741 port->reg_dma_ctl = VID_B_DMA_CTL;
742 port->reg_lngth = VID_B_LNGTH;
743 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
744 port->reg_gen_ctrl = VID_B_GEN_CTL;
745 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
746 port->reg_sop_status = VID_B_SOP_STATUS;
747 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
748 port->reg_vld_misc = VID_B_VLD_MISC;
749 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
750 port->reg_src_sel = VID_B_SRC_SEL;
751 port->reg_ts_int_msk = VID_B_INT_MSK;
752 port->reg_ts_int_stat = VID_B_INT_STAT;
753 port->sram_chno = SRAM_CH03; /* VID_B */
754 port->pci_irqmask = 0x02; /* VID_B bit1 */
755 break;
756 case 2:
757 port->reg_gpcnt = VID_C_GPCNT;
758 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
759 port->reg_dma_ctl = VID_C_DMA_CTL;
760 port->reg_lngth = VID_C_LNGTH;
761 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
762 port->reg_gen_ctrl = VID_C_GEN_CTL;
763 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
764 port->reg_sop_status = VID_C_SOP_STATUS;
765 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
766 port->reg_vld_misc = VID_C_VLD_MISC;
767 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
768 port->reg_src_sel = 0;
769 port->reg_ts_int_msk = VID_C_INT_MSK;
770 port->reg_ts_int_stat = VID_C_INT_STAT;
771 port->sram_chno = SRAM_CH06; /* VID_C */
772 port->pci_irqmask = 0x04; /* VID_C bit2 */
773 break;
774 default:
775 BUG();
776 }
777
778 cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
779 port->reg_dma_ctl, port->dma_ctl_val, 0x00);
780
781 return 0;
782 }
783
cx23885_dev_checkrevision(struct cx23885_dev * dev)784 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
785 {
786 switch (cx_read(RDR_CFG2) & 0xff) {
787 case 0x00:
788 /* cx23885 */
789 dev->hwrevision = 0xa0;
790 break;
791 case 0x01:
792 /* CX23885-12Z */
793 dev->hwrevision = 0xa1;
794 break;
795 case 0x02:
796 /* CX23885-13Z/14Z */
797 dev->hwrevision = 0xb0;
798 break;
799 case 0x03:
800 if (dev->pci->device == 0x8880) {
801 /* CX23888-21Z/22Z */
802 dev->hwrevision = 0xc0;
803 } else {
804 /* CX23885-14Z */
805 dev->hwrevision = 0xa4;
806 }
807 break;
808 case 0x04:
809 if (dev->pci->device == 0x8880) {
810 /* CX23888-31Z */
811 dev->hwrevision = 0xd0;
812 } else {
813 /* CX23885-15Z, CX23888-31Z */
814 dev->hwrevision = 0xa5;
815 }
816 break;
817 case 0x0e:
818 /* CX23887-15Z */
819 dev->hwrevision = 0xc0;
820 break;
821 case 0x0f:
822 /* CX23887-14Z */
823 dev->hwrevision = 0xb1;
824 break;
825 default:
826 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
827 __func__, dev->hwrevision);
828 }
829 if (dev->hwrevision)
830 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
831 __func__, dev->hwrevision);
832 else
833 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
834 __func__, dev->hwrevision);
835 }
836
837 /* Find the first v4l2_subdev member of the group id in hw */
cx23885_find_hw(struct cx23885_dev * dev,u32 hw)838 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
839 {
840 struct v4l2_subdev *result = NULL;
841 struct v4l2_subdev *sd;
842
843 spin_lock(&dev->v4l2_dev.lock);
844 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
845 if (sd->grp_id == hw) {
846 result = sd;
847 break;
848 }
849 }
850 spin_unlock(&dev->v4l2_dev.lock);
851 return result;
852 }
853
cx23885_dev_setup(struct cx23885_dev * dev)854 static int cx23885_dev_setup(struct cx23885_dev *dev)
855 {
856 int i;
857
858 spin_lock_init(&dev->pci_irqmask_lock);
859
860 mutex_init(&dev->lock);
861 mutex_init(&dev->gpio_lock);
862
863 atomic_inc(&dev->refcount);
864
865 dev->nr = cx23885_devcount++;
866 sprintf(dev->name, "cx23885[%d]", dev->nr);
867
868 /* Configure the internal memory */
869 if (dev->pci->device == 0x8880) {
870 /* Could be 887 or 888, assume a default */
871 dev->bridge = CX23885_BRIDGE_887;
872 /* Apply a sensible clock frequency for the PCIe bridge */
873 dev->clk_freq = 25000000;
874 dev->sram_channels = cx23887_sram_channels;
875 } else
876 if (dev->pci->device == 0x8852) {
877 dev->bridge = CX23885_BRIDGE_885;
878 /* Apply a sensible clock frequency for the PCIe bridge */
879 dev->clk_freq = 28000000;
880 dev->sram_channels = cx23885_sram_channels;
881 } else
882 BUG();
883
884 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
885 __func__, dev->bridge);
886
887 /* board config */
888 dev->board = UNSET;
889 if (card[dev->nr] < cx23885_bcount)
890 dev->board = card[dev->nr];
891 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
892 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
893 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
894 dev->board = cx23885_subids[i].card;
895 if (UNSET == dev->board) {
896 dev->board = CX23885_BOARD_UNKNOWN;
897 cx23885_card_list(dev);
898 }
899
900 /* If the user specific a clk freq override, apply it */
901 if (cx23885_boards[dev->board].clk_freq > 0)
902 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
903
904 dev->pci_bus = dev->pci->bus->number;
905 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
906 cx23885_irq_add(dev, 0x001f00);
907
908 /* External Master 1 Bus */
909 dev->i2c_bus[0].nr = 0;
910 dev->i2c_bus[0].dev = dev;
911 dev->i2c_bus[0].reg_stat = I2C1_STAT;
912 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
913 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
914 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
915 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
916 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
917
918 /* External Master 2 Bus */
919 dev->i2c_bus[1].nr = 1;
920 dev->i2c_bus[1].dev = dev;
921 dev->i2c_bus[1].reg_stat = I2C2_STAT;
922 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
923 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
924 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
925 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
926 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
927
928 /* Internal Master 3 Bus */
929 dev->i2c_bus[2].nr = 2;
930 dev->i2c_bus[2].dev = dev;
931 dev->i2c_bus[2].reg_stat = I2C3_STAT;
932 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
933 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
934 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
935 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
936 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
937
938 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
939 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
940 cx23885_init_tsport(dev, &dev->ts1, 1);
941
942 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
943 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
944 cx23885_init_tsport(dev, &dev->ts2, 2);
945
946 if (get_resources(dev) < 0) {
947 printk(KERN_ERR "CORE %s No more PCIe resources for "
948 "subsystem: %04x:%04x\n",
949 dev->name, dev->pci->subsystem_vendor,
950 dev->pci->subsystem_device);
951
952 cx23885_devcount--;
953 return -ENODEV;
954 }
955
956 /* PCIe stuff */
957 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
958 pci_resource_len(dev->pci, 0));
959
960 dev->bmmio = (u8 __iomem *)dev->lmmio;
961
962 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
963 dev->name, dev->pci->subsystem_vendor,
964 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
965 dev->board, card[dev->nr] == dev->board ?
966 "insmod option" : "autodetected");
967
968 cx23885_pci_quirks(dev);
969
970 /* Assume some sensible defaults */
971 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
972 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
973 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
974 dev->radio_type = cx23885_boards[dev->board].radio_type;
975 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
976
977 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
978 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
979 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
980 __func__, dev->radio_type, dev->radio_addr);
981
982 /* The cx23417 encoder has GPIO's that need to be initialised
983 * before DVB, so that demodulators and tuners are out of
984 * reset before DVB uses them.
985 */
986 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
987 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
988 cx23885_mc417_init(dev);
989
990 /* init hardware */
991 cx23885_reset(dev);
992
993 cx23885_i2c_register(&dev->i2c_bus[0]);
994 cx23885_i2c_register(&dev->i2c_bus[1]);
995 cx23885_i2c_register(&dev->i2c_bus[2]);
996 cx23885_card_setup(dev);
997 call_all(dev, core, s_power, 0);
998 cx23885_ir_init(dev);
999
1000 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1001 if (cx23885_video_register(dev) < 0) {
1002 printk(KERN_ERR "%s() Failed to register analog "
1003 "video adapters on VID_A\n", __func__);
1004 }
1005 }
1006
1007 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1008 if (cx23885_boards[dev->board].num_fds_portb)
1009 dev->ts1.num_frontends =
1010 cx23885_boards[dev->board].num_fds_portb;
1011 if (cx23885_dvb_register(&dev->ts1) < 0) {
1012 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
1013 __func__);
1014 }
1015 } else
1016 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1017 if (cx23885_417_register(dev) < 0) {
1018 printk(KERN_ERR
1019 "%s() Failed to register 417 on VID_B\n",
1020 __func__);
1021 }
1022 }
1023
1024 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1025 if (cx23885_boards[dev->board].num_fds_portc)
1026 dev->ts2.num_frontends =
1027 cx23885_boards[dev->board].num_fds_portc;
1028 if (cx23885_dvb_register(&dev->ts2) < 0) {
1029 printk(KERN_ERR
1030 "%s() Failed to register dvb on VID_C\n",
1031 __func__);
1032 }
1033 } else
1034 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1035 if (cx23885_417_register(dev) < 0) {
1036 printk(KERN_ERR
1037 "%s() Failed to register 417 on VID_C\n",
1038 __func__);
1039 }
1040 }
1041
1042 cx23885_dev_checkrevision(dev);
1043
1044 /* disable MSI for NetUP cards, otherwise CI is not working */
1045 if (cx23885_boards[dev->board].ci_type > 0)
1046 cx_clear(RDR_RDRCTL1, 1 << 8);
1047
1048 return 0;
1049 }
1050
cx23885_dev_unregister(struct cx23885_dev * dev)1051 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1052 {
1053 release_mem_region(pci_resource_start(dev->pci, 0),
1054 pci_resource_len(dev->pci, 0));
1055
1056 if (!atomic_dec_and_test(&dev->refcount))
1057 return;
1058
1059 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1060 cx23885_video_unregister(dev);
1061
1062 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1063 cx23885_dvb_unregister(&dev->ts1);
1064
1065 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1066 cx23885_417_unregister(dev);
1067
1068 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1069 cx23885_dvb_unregister(&dev->ts2);
1070
1071 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1072 cx23885_417_unregister(dev);
1073
1074 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1075 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1076 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1077
1078 iounmap(dev->lmmio);
1079 }
1080
cx23885_risc_field(__le32 * rp,struct scatterlist * sglist,unsigned int offset,u32 sync_line,unsigned int bpl,unsigned int padding,unsigned int lines)1081 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1082 unsigned int offset, u32 sync_line,
1083 unsigned int bpl, unsigned int padding,
1084 unsigned int lines)
1085 {
1086 struct scatterlist *sg;
1087 unsigned int line, todo;
1088
1089 /* sync instruction */
1090 if (sync_line != NO_SYNC_LINE)
1091 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1092
1093 /* scan lines */
1094 sg = sglist;
1095 for (line = 0; line < lines; line++) {
1096 while (offset && offset >= sg_dma_len(sg)) {
1097 offset -= sg_dma_len(sg);
1098 sg++;
1099 }
1100 if (bpl <= sg_dma_len(sg)-offset) {
1101 /* fits into current chunk */
1102 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
1103 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1104 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1105 offset += bpl;
1106 } else {
1107 /* scanline needs to be split */
1108 todo = bpl;
1109 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
1110 (sg_dma_len(sg)-offset));
1111 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1112 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1113 todo -= (sg_dma_len(sg)-offset);
1114 offset = 0;
1115 sg++;
1116 while (todo > sg_dma_len(sg)) {
1117 *(rp++) = cpu_to_le32(RISC_WRITE|
1118 sg_dma_len(sg));
1119 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1120 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1121 todo -= sg_dma_len(sg);
1122 sg++;
1123 }
1124 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1125 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1126 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1127 offset += todo;
1128 }
1129 offset += padding;
1130 }
1131
1132 return rp;
1133 }
1134
cx23885_risc_buffer(struct pci_dev * pci,struct btcx_riscmem * risc,struct scatterlist * sglist,unsigned int top_offset,unsigned int bottom_offset,unsigned int bpl,unsigned int padding,unsigned int lines)1135 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1136 struct scatterlist *sglist, unsigned int top_offset,
1137 unsigned int bottom_offset, unsigned int bpl,
1138 unsigned int padding, unsigned int lines)
1139 {
1140 u32 instructions, fields;
1141 __le32 *rp;
1142 int rc;
1143
1144 fields = 0;
1145 if (UNSET != top_offset)
1146 fields++;
1147 if (UNSET != bottom_offset)
1148 fields++;
1149
1150 /* estimate risc mem: worst case is one write per page border +
1151 one write per scan line + syncs + jump (all 2 dwords). Padding
1152 can cause next bpl to start close to a page border. First DMA
1153 region may be smaller than PAGE_SIZE */
1154 /* write and jump need and extra dword */
1155 instructions = fields * (1 + ((bpl + padding) * lines)
1156 / PAGE_SIZE + lines);
1157 instructions += 2;
1158 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1159 if (rc < 0)
1160 return rc;
1161
1162 /* write risc instructions */
1163 rp = risc->cpu;
1164 if (UNSET != top_offset)
1165 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1166 bpl, padding, lines);
1167 if (UNSET != bottom_offset)
1168 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1169 bpl, padding, lines);
1170
1171 /* save pointer to jmp instruction address */
1172 risc->jmp = rp;
1173 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1174 return 0;
1175 }
1176
cx23885_risc_databuffer(struct pci_dev * pci,struct btcx_riscmem * risc,struct scatterlist * sglist,unsigned int bpl,unsigned int lines)1177 static int cx23885_risc_databuffer(struct pci_dev *pci,
1178 struct btcx_riscmem *risc,
1179 struct scatterlist *sglist,
1180 unsigned int bpl,
1181 unsigned int lines)
1182 {
1183 u32 instructions;
1184 __le32 *rp;
1185 int rc;
1186
1187 /* estimate risc mem: worst case is one write per page border +
1188 one write per scan line + syncs + jump (all 2 dwords). Here
1189 there is no padding and no sync. First DMA region may be smaller
1190 than PAGE_SIZE */
1191 /* Jump and write need an extra dword */
1192 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1193 instructions += 1;
1194
1195 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1196 if (rc < 0)
1197 return rc;
1198
1199 /* write risc instructions */
1200 rp = risc->cpu;
1201 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
1202
1203 /* save pointer to jmp instruction address */
1204 risc->jmp = rp;
1205 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1206 return 0;
1207 }
1208
cx23885_risc_stopper(struct pci_dev * pci,struct btcx_riscmem * risc,u32 reg,u32 mask,u32 value)1209 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1210 u32 reg, u32 mask, u32 value)
1211 {
1212 __le32 *rp;
1213 int rc;
1214
1215 rc = btcx_riscmem_alloc(pci, risc, 4*16);
1216 if (rc < 0)
1217 return rc;
1218
1219 /* write risc instructions */
1220 rp = risc->cpu;
1221 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
1222 *(rp++) = cpu_to_le32(reg);
1223 *(rp++) = cpu_to_le32(value);
1224 *(rp++) = cpu_to_le32(mask);
1225 *(rp++) = cpu_to_le32(RISC_JUMP);
1226 *(rp++) = cpu_to_le32(risc->dma);
1227 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1228 return 0;
1229 }
1230
cx23885_free_buffer(struct videobuf_queue * q,struct cx23885_buffer * buf)1231 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1232 {
1233 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1234
1235 BUG_ON(in_interrupt());
1236 videobuf_waiton(q, &buf->vb, 0, 0);
1237 videobuf_dma_unmap(q->dev, dma);
1238 videobuf_dma_free(dma);
1239 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1240 buf->vb.state = VIDEOBUF_NEEDS_INIT;
1241 }
1242
cx23885_tsport_reg_dump(struct cx23885_tsport * port)1243 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1244 {
1245 struct cx23885_dev *dev = port->dev;
1246
1247 dprintk(1, "%s() Register Dump\n", __func__);
1248 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1249 cx_read(DEV_CNTRL2));
1250 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1251 cx23885_irq_get_mask(dev));
1252 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1253 cx_read(AUDIO_INT_INT_MSK));
1254 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1255 cx_read(AUD_INT_DMA_CTL));
1256 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1257 cx_read(AUDIO_EXT_INT_MSK));
1258 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1259 cx_read(AUD_EXT_DMA_CTL));
1260 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1261 cx_read(PAD_CTRL));
1262 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1263 cx_read(ALT_PIN_OUT_SEL));
1264 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1265 cx_read(GPIO2));
1266 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1267 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1268 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1269 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1270 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1271 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1272 if (port->reg_src_sel)
1273 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1274 port->reg_src_sel, cx_read(port->reg_src_sel));
1275 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1276 port->reg_lngth, cx_read(port->reg_lngth));
1277 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1278 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1279 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1280 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1281 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1282 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1283 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1284 port->reg_sop_status, cx_read(port->reg_sop_status));
1285 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1286 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1287 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1288 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1289 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1290 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1291 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1292 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1293 }
1294
cx23885_start_dma(struct cx23885_tsport * port,struct cx23885_dmaqueue * q,struct cx23885_buffer * buf)1295 static int cx23885_start_dma(struct cx23885_tsport *port,
1296 struct cx23885_dmaqueue *q,
1297 struct cx23885_buffer *buf)
1298 {
1299 struct cx23885_dev *dev = port->dev;
1300 u32 reg;
1301
1302 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1303 buf->vb.width, buf->vb.height, buf->vb.field);
1304
1305 /* Stop the fifo and risc engine for this port */
1306 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1307
1308 /* setup fifo + format */
1309 cx23885_sram_channel_setup(dev,
1310 &dev->sram_channels[port->sram_chno],
1311 port->ts_packet_size, buf->risc.dma);
1312 if (debug > 5) {
1313 cx23885_sram_channel_dump(dev,
1314 &dev->sram_channels[port->sram_chno]);
1315 cx23885_risc_disasm(port, &buf->risc);
1316 }
1317
1318 /* write TS length to chip */
1319 cx_write(port->reg_lngth, buf->vb.width);
1320
1321 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1322 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1323 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1324 __func__,
1325 cx23885_boards[dev->board].portb,
1326 cx23885_boards[dev->board].portc);
1327 return -EINVAL;
1328 }
1329
1330 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1331 cx23885_av_clk(dev, 0);
1332
1333 udelay(100);
1334
1335 /* If the port supports SRC SELECT, configure it */
1336 if (port->reg_src_sel)
1337 cx_write(port->reg_src_sel, port->src_sel_val);
1338
1339 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1340 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1341 cx_write(port->reg_vld_misc, port->vld_misc_val);
1342 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1343 udelay(100);
1344
1345 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1346 /* reset counter to zero */
1347 cx_write(port->reg_gpcnt_ctl, 3);
1348 q->count = 1;
1349
1350 /* Set VIDB pins to input */
1351 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1352 reg = cx_read(PAD_CTRL);
1353 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1354 cx_write(PAD_CTRL, reg);
1355 }
1356
1357 /* Set VIDC pins to input */
1358 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1359 reg = cx_read(PAD_CTRL);
1360 reg &= ~0x4; /* Clear TS2_SOP_OE */
1361 cx_write(PAD_CTRL, reg);
1362 }
1363
1364 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1365
1366 reg = cx_read(PAD_CTRL);
1367 reg = reg & ~0x1; /* Clear TS1_OE */
1368
1369 /* FIXME, bit 2 writing here is questionable */
1370 /* set TS1_SOP_OE and TS1_OE_HI */
1371 reg = reg | 0xa;
1372 cx_write(PAD_CTRL, reg);
1373
1374 /* FIXME and these two registers should be documented. */
1375 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1376 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1377 }
1378
1379 switch (dev->bridge) {
1380 case CX23885_BRIDGE_885:
1381 case CX23885_BRIDGE_887:
1382 case CX23885_BRIDGE_888:
1383 /* enable irqs */
1384 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1385 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1386 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1387 cx23885_irq_add(dev, port->pci_irqmask);
1388 cx23885_irq_enable_all(dev);
1389 break;
1390 default:
1391 BUG();
1392 }
1393
1394 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1395
1396 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1397 cx23885_av_clk(dev, 1);
1398
1399 if (debug > 4)
1400 cx23885_tsport_reg_dump(port);
1401
1402 return 0;
1403 }
1404
cx23885_stop_dma(struct cx23885_tsport * port)1405 static int cx23885_stop_dma(struct cx23885_tsport *port)
1406 {
1407 struct cx23885_dev *dev = port->dev;
1408 u32 reg;
1409
1410 dprintk(1, "%s()\n", __func__);
1411
1412 /* Stop interrupts and DMA */
1413 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1414 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1415
1416 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1417
1418 reg = cx_read(PAD_CTRL);
1419
1420 /* Set TS1_OE */
1421 reg = reg | 0x1;
1422
1423 /* clear TS1_SOP_OE and TS1_OE_HI */
1424 reg = reg & ~0xa;
1425 cx_write(PAD_CTRL, reg);
1426 cx_write(port->reg_src_sel, 0);
1427 cx_write(port->reg_gen_ctrl, 8);
1428
1429 }
1430
1431 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1432 cx23885_av_clk(dev, 0);
1433
1434 return 0;
1435 }
1436
cx23885_restart_queue(struct cx23885_tsport * port,struct cx23885_dmaqueue * q)1437 int cx23885_restart_queue(struct cx23885_tsport *port,
1438 struct cx23885_dmaqueue *q)
1439 {
1440 struct cx23885_dev *dev = port->dev;
1441 struct cx23885_buffer *buf;
1442
1443 dprintk(5, "%s()\n", __func__);
1444 if (list_empty(&q->active)) {
1445 struct cx23885_buffer *prev;
1446 prev = NULL;
1447
1448 dprintk(5, "%s() queue is empty\n", __func__);
1449
1450 for (;;) {
1451 if (list_empty(&q->queued))
1452 return 0;
1453 buf = list_entry(q->queued.next, struct cx23885_buffer,
1454 vb.queue);
1455 if (NULL == prev) {
1456 list_del(&buf->vb.queue);
1457 list_add_tail(&buf->vb.queue, &q->active);
1458 cx23885_start_dma(port, q, buf);
1459 buf->vb.state = VIDEOBUF_ACTIVE;
1460 buf->count = q->count++;
1461 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1462 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1463 buf, buf->vb.i);
1464
1465 } else if (prev->vb.width == buf->vb.width &&
1466 prev->vb.height == buf->vb.height &&
1467 prev->fmt == buf->fmt) {
1468 list_del(&buf->vb.queue);
1469 list_add_tail(&buf->vb.queue, &q->active);
1470 buf->vb.state = VIDEOBUF_ACTIVE;
1471 buf->count = q->count++;
1472 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1473 /* 64 bit bits 63-32 */
1474 prev->risc.jmp[2] = cpu_to_le32(0);
1475 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1476 buf, buf->vb.i);
1477 } else {
1478 return 0;
1479 }
1480 prev = buf;
1481 }
1482 return 0;
1483 }
1484
1485 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1486 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1487 buf, buf->vb.i);
1488 cx23885_start_dma(port, q, buf);
1489 list_for_each_entry(buf, &q->active, vb.queue)
1490 buf->count = q->count++;
1491 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1492 return 0;
1493 }
1494
1495 /* ------------------------------------------------------------------ */
1496
cx23885_buf_prepare(struct videobuf_queue * q,struct cx23885_tsport * port,struct cx23885_buffer * buf,enum v4l2_field field)1497 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1498 struct cx23885_buffer *buf, enum v4l2_field field)
1499 {
1500 struct cx23885_dev *dev = port->dev;
1501 int size = port->ts_packet_size * port->ts_packet_count;
1502 int rc;
1503
1504 dprintk(1, "%s: %p\n", __func__, buf);
1505 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1506 return -EINVAL;
1507
1508 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1509 buf->vb.width = port->ts_packet_size;
1510 buf->vb.height = port->ts_packet_count;
1511 buf->vb.size = size;
1512 buf->vb.field = field /*V4L2_FIELD_TOP*/;
1513
1514 rc = videobuf_iolock(q, &buf->vb, NULL);
1515 if (0 != rc)
1516 goto fail;
1517 cx23885_risc_databuffer(dev->pci, &buf->risc,
1518 videobuf_to_dma(&buf->vb)->sglist,
1519 buf->vb.width, buf->vb.height);
1520 }
1521 buf->vb.state = VIDEOBUF_PREPARED;
1522 return 0;
1523
1524 fail:
1525 cx23885_free_buffer(q, buf);
1526 return rc;
1527 }
1528
cx23885_buf_queue(struct cx23885_tsport * port,struct cx23885_buffer * buf)1529 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1530 {
1531 struct cx23885_buffer *prev;
1532 struct cx23885_dev *dev = port->dev;
1533 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1534
1535 /* add jump to stopper */
1536 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1537 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1538 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1539
1540 if (list_empty(&cx88q->active)) {
1541 dprintk(1, "queue is empty - first active\n");
1542 list_add_tail(&buf->vb.queue, &cx88q->active);
1543 cx23885_start_dma(port, cx88q, buf);
1544 buf->vb.state = VIDEOBUF_ACTIVE;
1545 buf->count = cx88q->count++;
1546 mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1547 dprintk(1, "[%p/%d] %s - first active\n",
1548 buf, buf->vb.i, __func__);
1549 } else {
1550 dprintk(1, "queue is not empty - append to active\n");
1551 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1552 vb.queue);
1553 list_add_tail(&buf->vb.queue, &cx88q->active);
1554 buf->vb.state = VIDEOBUF_ACTIVE;
1555 buf->count = cx88q->count++;
1556 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1557 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1558 dprintk(1, "[%p/%d] %s - append to active\n",
1559 buf, buf->vb.i, __func__);
1560 }
1561 }
1562
1563 /* ----------------------------------------------------------- */
1564
do_cancel_buffers(struct cx23885_tsport * port,char * reason,int restart)1565 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1566 int restart)
1567 {
1568 struct cx23885_dev *dev = port->dev;
1569 struct cx23885_dmaqueue *q = &port->mpegq;
1570 struct cx23885_buffer *buf;
1571 unsigned long flags;
1572
1573 spin_lock_irqsave(&port->slock, flags);
1574 while (!list_empty(&q->active)) {
1575 buf = list_entry(q->active.next, struct cx23885_buffer,
1576 vb.queue);
1577 list_del(&buf->vb.queue);
1578 buf->vb.state = VIDEOBUF_ERROR;
1579 wake_up(&buf->vb.done);
1580 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1581 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1582 }
1583 if (restart) {
1584 dprintk(1, "restarting queue\n");
1585 cx23885_restart_queue(port, q);
1586 }
1587 spin_unlock_irqrestore(&port->slock, flags);
1588 }
1589
cx23885_cancel_buffers(struct cx23885_tsport * port)1590 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1591 {
1592 struct cx23885_dev *dev = port->dev;
1593 struct cx23885_dmaqueue *q = &port->mpegq;
1594
1595 dprintk(1, "%s()\n", __func__);
1596 del_timer_sync(&q->timeout);
1597 cx23885_stop_dma(port);
1598 do_cancel_buffers(port, "cancel", 0);
1599 }
1600
cx23885_timeout(unsigned long data)1601 static void cx23885_timeout(unsigned long data)
1602 {
1603 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1604 struct cx23885_dev *dev = port->dev;
1605
1606 dprintk(1, "%s()\n", __func__);
1607
1608 if (debug > 5)
1609 cx23885_sram_channel_dump(dev,
1610 &dev->sram_channels[port->sram_chno]);
1611
1612 cx23885_stop_dma(port);
1613 do_cancel_buffers(port, "timeout", 1);
1614 }
1615
cx23885_irq_417(struct cx23885_dev * dev,u32 status)1616 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1617 {
1618 /* FIXME: port1 assumption here. */
1619 struct cx23885_tsport *port = &dev->ts1;
1620 int count = 0;
1621 int handled = 0;
1622
1623 if (status == 0)
1624 return handled;
1625
1626 count = cx_read(port->reg_gpcnt);
1627 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1628 status, cx_read(port->reg_ts_int_msk), count);
1629
1630 if ((status & VID_B_MSK_BAD_PKT) ||
1631 (status & VID_B_MSK_OPC_ERR) ||
1632 (status & VID_B_MSK_VBI_OPC_ERR) ||
1633 (status & VID_B_MSK_SYNC) ||
1634 (status & VID_B_MSK_VBI_SYNC) ||
1635 (status & VID_B_MSK_OF) ||
1636 (status & VID_B_MSK_VBI_OF)) {
1637 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1638 "= 0x%x\n", dev->name, status);
1639 if (status & VID_B_MSK_BAD_PKT)
1640 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1641 if (status & VID_B_MSK_OPC_ERR)
1642 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1643 if (status & VID_B_MSK_VBI_OPC_ERR)
1644 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1645 if (status & VID_B_MSK_SYNC)
1646 dprintk(1, " VID_B_MSK_SYNC\n");
1647 if (status & VID_B_MSK_VBI_SYNC)
1648 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1649 if (status & VID_B_MSK_OF)
1650 dprintk(1, " VID_B_MSK_OF\n");
1651 if (status & VID_B_MSK_VBI_OF)
1652 dprintk(1, " VID_B_MSK_VBI_OF\n");
1653
1654 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1655 cx23885_sram_channel_dump(dev,
1656 &dev->sram_channels[port->sram_chno]);
1657 cx23885_417_check_encoder(dev);
1658 } else if (status & VID_B_MSK_RISCI1) {
1659 dprintk(7, " VID_B_MSK_RISCI1\n");
1660 spin_lock(&port->slock);
1661 cx23885_wakeup(port, &port->mpegq, count);
1662 spin_unlock(&port->slock);
1663 } else if (status & VID_B_MSK_RISCI2) {
1664 dprintk(7, " VID_B_MSK_RISCI2\n");
1665 spin_lock(&port->slock);
1666 cx23885_restart_queue(port, &port->mpegq);
1667 spin_unlock(&port->slock);
1668 }
1669 if (status) {
1670 cx_write(port->reg_ts_int_stat, status);
1671 handled = 1;
1672 }
1673
1674 return handled;
1675 }
1676
cx23885_irq_ts(struct cx23885_tsport * port,u32 status)1677 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1678 {
1679 struct cx23885_dev *dev = port->dev;
1680 int handled = 0;
1681 u32 count;
1682
1683 if ((status & VID_BC_MSK_OPC_ERR) ||
1684 (status & VID_BC_MSK_BAD_PKT) ||
1685 (status & VID_BC_MSK_SYNC) ||
1686 (status & VID_BC_MSK_OF)) {
1687
1688 if (status & VID_BC_MSK_OPC_ERR)
1689 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1690 VID_BC_MSK_OPC_ERR);
1691
1692 if (status & VID_BC_MSK_BAD_PKT)
1693 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1694 VID_BC_MSK_BAD_PKT);
1695
1696 if (status & VID_BC_MSK_SYNC)
1697 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1698 VID_BC_MSK_SYNC);
1699
1700 if (status & VID_BC_MSK_OF)
1701 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1702 VID_BC_MSK_OF);
1703
1704 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1705
1706 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1707 cx23885_sram_channel_dump(dev,
1708 &dev->sram_channels[port->sram_chno]);
1709
1710 } else if (status & VID_BC_MSK_RISCI1) {
1711
1712 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1713
1714 spin_lock(&port->slock);
1715 count = cx_read(port->reg_gpcnt);
1716 cx23885_wakeup(port, &port->mpegq, count);
1717 spin_unlock(&port->slock);
1718
1719 } else if (status & VID_BC_MSK_RISCI2) {
1720
1721 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
1722
1723 spin_lock(&port->slock);
1724 cx23885_restart_queue(port, &port->mpegq);
1725 spin_unlock(&port->slock);
1726
1727 }
1728 if (status) {
1729 cx_write(port->reg_ts_int_stat, status);
1730 handled = 1;
1731 }
1732
1733 return handled;
1734 }
1735
cx23885_irq(int irq,void * dev_id)1736 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1737 {
1738 struct cx23885_dev *dev = dev_id;
1739 struct cx23885_tsport *ts1 = &dev->ts1;
1740 struct cx23885_tsport *ts2 = &dev->ts2;
1741 u32 pci_status, pci_mask;
1742 u32 vida_status, vida_mask;
1743 u32 ts1_status, ts1_mask;
1744 u32 ts2_status, ts2_mask;
1745 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1746 bool subdev_handled;
1747
1748 pci_status = cx_read(PCI_INT_STAT);
1749 pci_mask = cx23885_irq_get_mask(dev);
1750 vida_status = cx_read(VID_A_INT_STAT);
1751 vida_mask = cx_read(VID_A_INT_MSK);
1752 ts1_status = cx_read(VID_B_INT_STAT);
1753 ts1_mask = cx_read(VID_B_INT_MSK);
1754 ts2_status = cx_read(VID_C_INT_STAT);
1755 ts2_mask = cx_read(VID_C_INT_MSK);
1756
1757 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1758 goto out;
1759
1760 vida_count = cx_read(VID_A_GPCNT);
1761 ts1_count = cx_read(ts1->reg_gpcnt);
1762 ts2_count = cx_read(ts2->reg_gpcnt);
1763 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1764 pci_status, pci_mask);
1765 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1766 vida_status, vida_mask, vida_count);
1767 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1768 ts1_status, ts1_mask, ts1_count);
1769 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1770 ts2_status, ts2_mask, ts2_count);
1771
1772 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1773 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1774 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1775 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1776 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1777 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1778
1779 if (pci_status & PCI_MSK_RISC_RD)
1780 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1781 PCI_MSK_RISC_RD);
1782
1783 if (pci_status & PCI_MSK_RISC_WR)
1784 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1785 PCI_MSK_RISC_WR);
1786
1787 if (pci_status & PCI_MSK_AL_RD)
1788 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1789 PCI_MSK_AL_RD);
1790
1791 if (pci_status & PCI_MSK_AL_WR)
1792 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1793 PCI_MSK_AL_WR);
1794
1795 if (pci_status & PCI_MSK_APB_DMA)
1796 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1797 PCI_MSK_APB_DMA);
1798
1799 if (pci_status & PCI_MSK_VID_C)
1800 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1801 PCI_MSK_VID_C);
1802
1803 if (pci_status & PCI_MSK_VID_B)
1804 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1805 PCI_MSK_VID_B);
1806
1807 if (pci_status & PCI_MSK_VID_A)
1808 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1809 PCI_MSK_VID_A);
1810
1811 if (pci_status & PCI_MSK_AUD_INT)
1812 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1813 PCI_MSK_AUD_INT);
1814
1815 if (pci_status & PCI_MSK_AUD_EXT)
1816 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1817 PCI_MSK_AUD_EXT);
1818
1819 if (pci_status & PCI_MSK_GPIO0)
1820 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1821 PCI_MSK_GPIO0);
1822
1823 if (pci_status & PCI_MSK_GPIO1)
1824 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1825 PCI_MSK_GPIO1);
1826
1827 if (pci_status & PCI_MSK_AV_CORE)
1828 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1829 PCI_MSK_AV_CORE);
1830
1831 if (pci_status & PCI_MSK_IR)
1832 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1833 PCI_MSK_IR);
1834 }
1835
1836 if (cx23885_boards[dev->board].ci_type == 1 &&
1837 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1838 handled += netup_ci_slot_status(dev, pci_status);
1839
1840 if (cx23885_boards[dev->board].ci_type == 2 &&
1841 (pci_status & PCI_MSK_GPIO0))
1842 handled += altera_ci_irq(dev);
1843
1844 if (ts1_status) {
1845 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1846 handled += cx23885_irq_ts(ts1, ts1_status);
1847 else
1848 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1849 handled += cx23885_irq_417(dev, ts1_status);
1850 }
1851
1852 if (ts2_status) {
1853 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1854 handled += cx23885_irq_ts(ts2, ts2_status);
1855 else
1856 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1857 handled += cx23885_irq_417(dev, ts2_status);
1858 }
1859
1860 if (vida_status)
1861 handled += cx23885_video_irq(dev, vida_status);
1862
1863 if (pci_status & PCI_MSK_IR) {
1864 subdev_handled = false;
1865 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1866 pci_status, &subdev_handled);
1867 if (subdev_handled)
1868 handled++;
1869 }
1870
1871 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1872 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1873 if (!schedule_work(&dev->cx25840_work))
1874 printk(KERN_ERR "%s: failed to set up deferred work for"
1875 " AV Core/IR interrupt. Interrupt is disabled"
1876 " and won't be re-enabled\n", dev->name);
1877 handled++;
1878 }
1879
1880 if (handled)
1881 cx_write(PCI_INT_STAT, pci_status);
1882 out:
1883 return IRQ_RETVAL(handled);
1884 }
1885
cx23885_v4l2_dev_notify(struct v4l2_subdev * sd,unsigned int notification,void * arg)1886 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1887 unsigned int notification, void *arg)
1888 {
1889 struct cx23885_dev *dev;
1890
1891 if (sd == NULL)
1892 return;
1893
1894 dev = to_cx23885(sd->v4l2_dev);
1895
1896 switch (notification) {
1897 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1898 if (sd == dev->sd_ir)
1899 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1900 break;
1901 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1902 if (sd == dev->sd_ir)
1903 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1904 break;
1905 }
1906 }
1907
cx23885_v4l2_dev_notify_init(struct cx23885_dev * dev)1908 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1909 {
1910 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1911 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1912 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1913 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1914 }
1915
encoder_on_portb(struct cx23885_dev * dev)1916 static inline int encoder_on_portb(struct cx23885_dev *dev)
1917 {
1918 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1919 }
1920
encoder_on_portc(struct cx23885_dev * dev)1921 static inline int encoder_on_portc(struct cx23885_dev *dev)
1922 {
1923 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1924 }
1925
1926 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1927 * registers depending on the board configuration (and whether the
1928 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1929 * be pushed into the correct hardware register, regardless of the
1930 * physical location. Certain registers are shared so we sanity check
1931 * and report errors if we think we're tampering with a GPIo that might
1932 * be assigned to the encoder (and used for the host bus).
1933 *
1934 * GPIO 2 thru 0 - On the cx23885 bridge
1935 * GPIO 18 thru 3 - On the cx23417 host bus interface
1936 * GPIO 23 thru 19 - On the cx25840 a/v core
1937 */
cx23885_gpio_set(struct cx23885_dev * dev,u32 mask)1938 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1939 {
1940 if (mask & 0x7)
1941 cx_set(GP0_IO, mask & 0x7);
1942
1943 if (mask & 0x0007fff8) {
1944 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1945 printk(KERN_ERR
1946 "%s: Setting GPIO on encoder ports\n",
1947 dev->name);
1948 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1949 }
1950
1951 /* TODO: 23-19 */
1952 if (mask & 0x00f80000)
1953 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1954 }
1955
cx23885_gpio_clear(struct cx23885_dev * dev,u32 mask)1956 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1957 {
1958 if (mask & 0x00000007)
1959 cx_clear(GP0_IO, mask & 0x7);
1960
1961 if (mask & 0x0007fff8) {
1962 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1963 printk(KERN_ERR
1964 "%s: Clearing GPIO moving on encoder ports\n",
1965 dev->name);
1966 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1967 }
1968
1969 /* TODO: 23-19 */
1970 if (mask & 0x00f80000)
1971 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1972 }
1973
cx23885_gpio_get(struct cx23885_dev * dev,u32 mask)1974 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1975 {
1976 if (mask & 0x00000007)
1977 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1978
1979 if (mask & 0x0007fff8) {
1980 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1981 printk(KERN_ERR
1982 "%s: Reading GPIO moving on encoder ports\n",
1983 dev->name);
1984 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1985 }
1986
1987 /* TODO: 23-19 */
1988 if (mask & 0x00f80000)
1989 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1990
1991 return 0;
1992 }
1993
cx23885_gpio_enable(struct cx23885_dev * dev,u32 mask,int asoutput)1994 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1995 {
1996 if ((mask & 0x00000007) && asoutput)
1997 cx_set(GP0_IO, (mask & 0x7) << 16);
1998 else if ((mask & 0x00000007) && !asoutput)
1999 cx_clear(GP0_IO, (mask & 0x7) << 16);
2000
2001 if (mask & 0x0007fff8) {
2002 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2003 printk(KERN_ERR
2004 "%s: Enabling GPIO on encoder ports\n",
2005 dev->name);
2006 }
2007
2008 /* MC417_OEN is active low for output, write 1 for an input */
2009 if ((mask & 0x0007fff8) && asoutput)
2010 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2011
2012 else if ((mask & 0x0007fff8) && !asoutput)
2013 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2014
2015 /* TODO: 23-19 */
2016 }
2017
cx23885_initdev(struct pci_dev * pci_dev,const struct pci_device_id * pci_id)2018 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
2019 const struct pci_device_id *pci_id)
2020 {
2021 struct cx23885_dev *dev;
2022 int err;
2023
2024 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2025 if (NULL == dev)
2026 return -ENOMEM;
2027
2028 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2029 if (err < 0)
2030 goto fail_free;
2031
2032 /* Prepare to handle notifications from subdevices */
2033 cx23885_v4l2_dev_notify_init(dev);
2034
2035 /* pci init */
2036 dev->pci = pci_dev;
2037 if (pci_enable_device(pci_dev)) {
2038 err = -EIO;
2039 goto fail_unreg;
2040 }
2041
2042 if (cx23885_dev_setup(dev) < 0) {
2043 err = -EINVAL;
2044 goto fail_unreg;
2045 }
2046
2047 /* print pci info */
2048 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
2049 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2050 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2051 "latency: %d, mmio: 0x%llx\n", dev->name,
2052 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2053 dev->pci_lat,
2054 (unsigned long long)pci_resource_start(pci_dev, 0));
2055
2056 pci_set_master(pci_dev);
2057 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
2058 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2059 err = -EIO;
2060 goto fail_irq;
2061 }
2062
2063 if (!pci_enable_msi(pci_dev))
2064 err = request_irq(pci_dev->irq, cx23885_irq,
2065 IRQF_DISABLED, dev->name, dev);
2066 else
2067 err = request_irq(pci_dev->irq, cx23885_irq,
2068 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
2069 if (err < 0) {
2070 printk(KERN_ERR "%s: can't get IRQ %d\n",
2071 dev->name, pci_dev->irq);
2072 goto fail_irq;
2073 }
2074
2075 switch (dev->board) {
2076 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2077 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2078 break;
2079 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2080 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2081 break;
2082 }
2083
2084 /*
2085 * The CX2388[58] IR controller can start firing interrupts when
2086 * enabled, so these have to take place after the cx23885_irq() handler
2087 * is hooked up by the call to request_irq() above.
2088 */
2089 cx23885_ir_pci_int_enable(dev);
2090 cx23885_input_init(dev);
2091
2092 return 0;
2093
2094 fail_irq:
2095 cx23885_dev_unregister(dev);
2096 fail_unreg:
2097 v4l2_device_unregister(&dev->v4l2_dev);
2098 fail_free:
2099 kfree(dev);
2100 return err;
2101 }
2102
cx23885_finidev(struct pci_dev * pci_dev)2103 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2104 {
2105 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2106 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2107
2108 cx23885_input_fini(dev);
2109 cx23885_ir_fini(dev);
2110
2111 cx23885_shutdown(dev);
2112
2113 pci_disable_device(pci_dev);
2114
2115 /* unregister stuff */
2116 free_irq(pci_dev->irq, dev);
2117 pci_disable_msi(pci_dev);
2118
2119 cx23885_dev_unregister(dev);
2120 v4l2_device_unregister(v4l2_dev);
2121 kfree(dev);
2122 }
2123
2124 static struct pci_device_id cx23885_pci_tbl[] = {
2125 {
2126 /* CX23885 */
2127 .vendor = 0x14f1,
2128 .device = 0x8852,
2129 .subvendor = PCI_ANY_ID,
2130 .subdevice = PCI_ANY_ID,
2131 }, {
2132 /* CX23887 Rev 2 */
2133 .vendor = 0x14f1,
2134 .device = 0x8880,
2135 .subvendor = PCI_ANY_ID,
2136 .subdevice = PCI_ANY_ID,
2137 }, {
2138 /* --- end of list --- */
2139 }
2140 };
2141 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2142
2143 static struct pci_driver cx23885_pci_driver = {
2144 .name = "cx23885",
2145 .id_table = cx23885_pci_tbl,
2146 .probe = cx23885_initdev,
2147 .remove = __devexit_p(cx23885_finidev),
2148 /* TODO */
2149 .suspend = NULL,
2150 .resume = NULL,
2151 };
2152
cx23885_init(void)2153 static int __init cx23885_init(void)
2154 {
2155 printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
2156 (CX23885_VERSION_CODE >> 16) & 0xff,
2157 (CX23885_VERSION_CODE >> 8) & 0xff,
2158 CX23885_VERSION_CODE & 0xff);
2159 #ifdef SNAPSHOT
2160 printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
2161 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
2162 #endif
2163 return pci_register_driver(&cx23885_pci_driver);
2164 }
2165
cx23885_fini(void)2166 static void __exit cx23885_fini(void)
2167 {
2168 pci_unregister_driver(&cx23885_pci_driver);
2169 }
2170
2171 module_init(cx23885_init);
2172 module_exit(cx23885_fini);
2173
2174 /* ----------------------------------------------------------- */
2175