1 /********************************************************************
2 Filename: via-ircc.c
3 Version: 1.0
4 Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
5 Author: VIA Technologies,inc
6 Date : 08/06/2003
7
8 Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10 This program is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free Software
12 Foundation; either version 2, or (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 See the GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22
23 F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
24 F02 Oct/28/02: Add SB device ID for 3147 and 3177.
25 Comment :
26 jul/09/2002 : only implement two kind of dongle currently.
27 Oct/02/2002 : work on VT8231 and VT8233 .
28 Aug/06/2003 : change driver format to pci driver .
29
30 2004-02-16: <sda@bdit.de>
31 - Removed unneeded 'legacy' pci stuff.
32 - Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
33 - On speed change from core, don't send SIR frame with new speed.
34 Use current speed and change speeds later.
35 - Make module-param dongle_id actually work.
36 - New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
37 Tested with home-grown PCB on EPIA boards.
38 - Code cleanup.
39
40 ********************************************************************/
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/ioport.h>
47 #include <linux/delay.h>
48 #include <linux/init.h>
49 #include <linux/interrupt.h>
50 #include <linux/rtnetlink.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/gfp.h>
54
55 #include <asm/io.h>
56 #include <asm/dma.h>
57 #include <asm/byteorder.h>
58
59 #include <linux/pm.h>
60
61 #include <net/irda/wrapper.h>
62 #include <net/irda/irda.h>
63 #include <net/irda/irda_device.h>
64
65 #include "via-ircc.h"
66
67 #define VIA_MODULE_NAME "via-ircc"
68 #define CHIP_IO_EXTENT 0x40
69
70 static char *driver_name = VIA_MODULE_NAME;
71
72 /* Module parameters */
73 static int qos_mtt_bits = 0x07; /* 1 ms or more */
74 static int dongle_id = 0; /* default: probe */
75
76 /* We can't guess the type of connected dongle, user *must* supply it. */
77 module_param(dongle_id, int, 0);
78
79 /* Some prototypes */
80 static int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
81 unsigned int id);
82 static int via_ircc_dma_receive(struct via_ircc_cb *self);
83 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
84 int iobase);
85 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
86 struct net_device *dev);
87 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
88 struct net_device *dev);
89 static void via_hw_init(struct via_ircc_cb *self);
90 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
91 static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
92 static int via_ircc_is_receiving(struct via_ircc_cb *self);
93 static int via_ircc_read_dongle_id(int iobase);
94
95 static int via_ircc_net_open(struct net_device *dev);
96 static int via_ircc_net_close(struct net_device *dev);
97 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
98 int cmd);
99 static void via_ircc_change_dongle_speed(int iobase, int speed,
100 int dongle_id);
101 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
102 static void hwreset(struct via_ircc_cb *self);
103 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
104 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
105 static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id);
106 static void __devexit via_remove_one (struct pci_dev *pdev);
107
108 /* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
iodelay(int udelay)109 static void iodelay(int udelay)
110 {
111 u8 data;
112 int i;
113
114 for (i = 0; i < udelay; i++) {
115 data = inb(0x80);
116 }
117 }
118
119 static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
120 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
121 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
122 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
123 { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
124 { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
125 { 0, }
126 };
127
128 MODULE_DEVICE_TABLE(pci,via_pci_tbl);
129
130
131 static struct pci_driver via_driver = {
132 .name = VIA_MODULE_NAME,
133 .id_table = via_pci_tbl,
134 .probe = via_init_one,
135 .remove = __devexit_p(via_remove_one),
136 };
137
138
139 /*
140 * Function via_ircc_init ()
141 *
142 * Initialize chip. Just find out chip type and resource.
143 */
via_ircc_init(void)144 static int __init via_ircc_init(void)
145 {
146 int rc;
147
148 IRDA_DEBUG(3, "%s()\n", __func__);
149
150 rc = pci_register_driver(&via_driver);
151 if (rc < 0) {
152 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
153 __func__, rc);
154 return -ENODEV;
155 }
156 return 0;
157 }
158
via_init_one(struct pci_dev * pcidev,const struct pci_device_id * id)159 static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id)
160 {
161 int rc;
162 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
163 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
164 chipio_t info;
165
166 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
167
168 rc = pci_enable_device (pcidev);
169 if (rc) {
170 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
171 return -ENODEV;
172 }
173
174 // South Bridge exist
175 if ( ReadLPCReg(0x20) != 0x3C )
176 Chipset=0x3096;
177 else
178 Chipset=0x3076;
179
180 if (Chipset==0x3076) {
181 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
182
183 WriteLPCReg(7,0x0c );
184 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
185 if((temp&0x01)==1) { // BIOS close or no FIR
186 WriteLPCReg(0x1d, 0x82 );
187 WriteLPCReg(0x23,0x18);
188 temp=ReadLPCReg(0xF0);
189 if((temp&0x01)==0) {
190 temp=(ReadLPCReg(0x74)&0x03); //DMA
191 FirDRQ0=temp + 4;
192 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
193 FirDRQ1=temp + 4;
194 } else {
195 temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
196 FirDRQ0=temp + 4;
197 FirDRQ1=FirDRQ0;
198 }
199 FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
200 FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
201 FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
202 FirIOBase=FirIOBase ;
203 info.fir_base=FirIOBase;
204 info.irq=FirIRQ;
205 info.dma=FirDRQ1;
206 info.dma2=FirDRQ0;
207 pci_read_config_byte(pcidev,0x40,&bTmp);
208 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
209 pci_read_config_byte(pcidev,0x42,&bTmp);
210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
211 pci_write_config_byte(pcidev,0x5a,0xc0);
212 WriteLPCReg(0x28, 0x70 );
213 if (via_ircc_open(pcidev, &info, 0x3076) == 0)
214 rc=0;
215 } else
216 rc = -ENODEV; //IR not turn on
217 } else { //Not VT1211
218 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
219
220 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
221 if((bTmp&0x01)==1) { // BIOS enable FIR
222 //Enable Double DMA clock
223 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
224 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
225 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
226 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
227 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
228 pci_write_config_byte(pcidev,0x44,0x4e);
229 //---------- read configuration from Function0 of south bridge
230 if((bTmp&0x02)==0) {
231 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
232 FirDRQ0 = (bTmp1 & 0x30) >> 4;
233 pci_read_config_byte(pcidev,0x44,&bTmp1);
234 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
235 } else {
236 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
237 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
238 FirDRQ1=0;
239 }
240 pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
241 FirIRQ = bTmp1 & 0x0f;
242
243 pci_read_config_byte(pcidev,0x69,&bTmp);
244 FirIOBase = bTmp << 8;//hight byte
245 pci_read_config_byte(pcidev,0x68,&bTmp);
246 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
247 //-------------------------
248 info.fir_base=FirIOBase;
249 info.irq=FirIRQ;
250 info.dma=FirDRQ1;
251 info.dma2=FirDRQ0;
252 if (via_ircc_open(pcidev, &info, 0x3096) == 0)
253 rc=0;
254 } else
255 rc = -ENODEV; //IR not turn on !!!!!
256 }//Not VT1211
257
258 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
259 return rc;
260 }
261
via_ircc_cleanup(void)262 static void __exit via_ircc_cleanup(void)
263 {
264 IRDA_DEBUG(3, "%s()\n", __func__);
265
266 /* Cleanup all instances of the driver */
267 pci_unregister_driver (&via_driver);
268 }
269
270 static const struct net_device_ops via_ircc_sir_ops = {
271 .ndo_start_xmit = via_ircc_hard_xmit_sir,
272 .ndo_open = via_ircc_net_open,
273 .ndo_stop = via_ircc_net_close,
274 .ndo_do_ioctl = via_ircc_net_ioctl,
275 };
276 static const struct net_device_ops via_ircc_fir_ops = {
277 .ndo_start_xmit = via_ircc_hard_xmit_fir,
278 .ndo_open = via_ircc_net_open,
279 .ndo_stop = via_ircc_net_close,
280 .ndo_do_ioctl = via_ircc_net_ioctl,
281 };
282
283 /*
284 * Function via_ircc_open(pdev, iobase, irq)
285 *
286 * Open driver instance
287 *
288 */
via_ircc_open(struct pci_dev * pdev,chipio_t * info,unsigned int id)289 static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
290 unsigned int id)
291 {
292 struct net_device *dev;
293 struct via_ircc_cb *self;
294 int err;
295
296 IRDA_DEBUG(3, "%s()\n", __func__);
297
298 /* Allocate new instance of the driver */
299 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
300 if (dev == NULL)
301 return -ENOMEM;
302
303 self = netdev_priv(dev);
304 self->netdev = dev;
305 spin_lock_init(&self->lock);
306
307 pci_set_drvdata(pdev, self);
308
309 /* Initialize Resource */
310 self->io.cfg_base = info->cfg_base;
311 self->io.fir_base = info->fir_base;
312 self->io.irq = info->irq;
313 self->io.fir_ext = CHIP_IO_EXTENT;
314 self->io.dma = info->dma;
315 self->io.dma2 = info->dma2;
316 self->io.fifo_size = 32;
317 self->chip_id = id;
318 self->st_fifo.len = 0;
319 self->RxDataReady = 0;
320
321 /* Reserve the ioports that we need */
322 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
323 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
324 __func__, self->io.fir_base);
325 err = -ENODEV;
326 goto err_out1;
327 }
328
329 /* Initialize QoS for this device */
330 irda_init_max_qos_capabilies(&self->qos);
331
332 /* Check if user has supplied the dongle id or not */
333 if (!dongle_id)
334 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
335 self->io.dongle_id = dongle_id;
336
337 /* The only value we must override it the baudrate */
338 /* Maximum speeds and capabilities are dongle-dependent. */
339 switch( self->io.dongle_id ){
340 case 0x0d:
341 self->qos.baud_rate.bits =
342 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
343 IR_576000 | IR_1152000 | (IR_4000000 << 8);
344 break;
345 default:
346 self->qos.baud_rate.bits =
347 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
348 break;
349 }
350
351 /* Following was used for testing:
352 *
353 * self->qos.baud_rate.bits = IR_9600;
354 *
355 * Is is no good, as it prohibits (error-prone) speed-changes.
356 */
357
358 self->qos.min_turn_time.bits = qos_mtt_bits;
359 irda_qos_bits_to_value(&self->qos);
360
361 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
362 self->rx_buff.truesize = 14384 + 2048;
363 self->tx_buff.truesize = 14384 + 2048;
364
365 /* Allocate memory if needed */
366 self->rx_buff.head =
367 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
368 &self->rx_buff_dma, GFP_KERNEL);
369 if (self->rx_buff.head == NULL) {
370 err = -ENOMEM;
371 goto err_out2;
372 }
373 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
374
375 self->tx_buff.head =
376 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
377 &self->tx_buff_dma, GFP_KERNEL);
378 if (self->tx_buff.head == NULL) {
379 err = -ENOMEM;
380 goto err_out3;
381 }
382 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
383
384 self->rx_buff.in_frame = FALSE;
385 self->rx_buff.state = OUTSIDE_FRAME;
386 self->tx_buff.data = self->tx_buff.head;
387 self->rx_buff.data = self->rx_buff.head;
388
389 /* Reset Tx queue info */
390 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
391 self->tx_fifo.tail = self->tx_buff.head;
392
393 /* Override the network functions we need to use */
394 dev->netdev_ops = &via_ircc_sir_ops;
395
396 err = register_netdev(dev);
397 if (err)
398 goto err_out4;
399
400 IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
401
402 /* Initialise the hardware..
403 */
404 self->io.speed = 9600;
405 via_hw_init(self);
406 return 0;
407 err_out4:
408 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
409 self->tx_buff.head, self->tx_buff_dma);
410 err_out3:
411 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
412 self->rx_buff.head, self->rx_buff_dma);
413 err_out2:
414 release_region(self->io.fir_base, self->io.fir_ext);
415 err_out1:
416 pci_set_drvdata(pdev, NULL);
417 free_netdev(dev);
418 return err;
419 }
420
421 /*
422 * Function via_remove_one(pdev)
423 *
424 * Close driver instance
425 *
426 */
via_remove_one(struct pci_dev * pdev)427 static void __devexit via_remove_one(struct pci_dev *pdev)
428 {
429 struct via_ircc_cb *self = pci_get_drvdata(pdev);
430 int iobase;
431
432 IRDA_DEBUG(3, "%s()\n", __func__);
433
434 iobase = self->io.fir_base;
435
436 ResetChip(iobase, 5); //hardware reset.
437 /* Remove netdevice */
438 unregister_netdev(self->netdev);
439
440 /* Release the PORT that this driver is using */
441 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
442 __func__, self->io.fir_base);
443 release_region(self->io.fir_base, self->io.fir_ext);
444 if (self->tx_buff.head)
445 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
446 self->tx_buff.head, self->tx_buff_dma);
447 if (self->rx_buff.head)
448 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
449 self->rx_buff.head, self->rx_buff_dma);
450 pci_set_drvdata(pdev, NULL);
451
452 free_netdev(self->netdev);
453
454 pci_disable_device(pdev);
455 }
456
457 /*
458 * Function via_hw_init(self)
459 *
460 * Returns non-negative on success.
461 *
462 * Formerly via_ircc_setup
463 */
via_hw_init(struct via_ircc_cb * self)464 static void via_hw_init(struct via_ircc_cb *self)
465 {
466 int iobase = self->io.fir_base;
467
468 IRDA_DEBUG(3, "%s()\n", __func__);
469
470 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
471 // FIFO Init
472 EnRXFIFOReadyInt(iobase, OFF);
473 EnRXFIFOHalfLevelInt(iobase, OFF);
474 EnTXFIFOHalfLevelInt(iobase, OFF);
475 EnTXFIFOUnderrunEOMInt(iobase, ON);
476 EnTXFIFOReadyInt(iobase, OFF);
477 InvertTX(iobase, OFF);
478 InvertRX(iobase, OFF);
479
480 if (ReadLPCReg(0x20) == 0x3c)
481 WriteLPCReg(0xF0, 0); // for VT1211
482 /* Int Init */
483 EnRXSpecInt(iobase, ON);
484
485 /* The following is basically hwreset */
486 /* If this is the case, why not just call hwreset() ? Jean II */
487 ResetChip(iobase, 5);
488 EnableDMA(iobase, OFF);
489 EnableTX(iobase, OFF);
490 EnableRX(iobase, OFF);
491 EnRXDMA(iobase, OFF);
492 EnTXDMA(iobase, OFF);
493 RXStart(iobase, OFF);
494 TXStart(iobase, OFF);
495 InitCard(iobase);
496 CommonInit(iobase);
497 SIRFilter(iobase, ON);
498 SetSIR(iobase, ON);
499 CRC16(iobase, ON);
500 EnTXCRC(iobase, 0);
501 WriteReg(iobase, I_ST_CT_0, 0x00);
502 SetBaudRate(iobase, 9600);
503 SetPulseWidth(iobase, 12);
504 SetSendPreambleCount(iobase, 0);
505
506 self->io.speed = 9600;
507 self->st_fifo.len = 0;
508
509 via_ircc_change_dongle_speed(iobase, self->io.speed,
510 self->io.dongle_id);
511
512 WriteReg(iobase, I_ST_CT_0, 0x80);
513 }
514
515 /*
516 * Function via_ircc_read_dongle_id (void)
517 *
518 */
via_ircc_read_dongle_id(int iobase)519 static int via_ircc_read_dongle_id(int iobase)
520 {
521 int dongle_id = 9; /* Default to IBM */
522
523 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
524 return dongle_id;
525 }
526
527 /*
528 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
529 * Change speed of the attach dongle
530 * only implement two type of dongle currently.
531 */
via_ircc_change_dongle_speed(int iobase,int speed,int dongle_id)532 static void via_ircc_change_dongle_speed(int iobase, int speed,
533 int dongle_id)
534 {
535 u8 mode = 0;
536
537 /* speed is unused, as we use IsSIROn()/IsMIROn() */
538 speed = speed;
539
540 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
541 __func__, speed, iobase, dongle_id);
542
543 switch (dongle_id) {
544
545 /* Note: The dongle_id's listed here are derived from
546 * nsc-ircc.c */
547
548 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
549 UseOneRX(iobase, ON); // use one RX pin RX1,RX2
550 InvertTX(iobase, OFF);
551 InvertRX(iobase, OFF);
552
553 EnRX2(iobase, ON); //sir to rx2
554 EnGPIOtoRX2(iobase, OFF);
555
556 if (IsSIROn(iobase)) { //sir
557 // Mode select Off
558 SlowIRRXLowActive(iobase, ON);
559 udelay(1000);
560 SlowIRRXLowActive(iobase, OFF);
561 } else {
562 if (IsMIROn(iobase)) { //mir
563 // Mode select On
564 SlowIRRXLowActive(iobase, OFF);
565 udelay(20);
566 } else { // fir
567 if (IsFIROn(iobase)) { //fir
568 // Mode select On
569 SlowIRRXLowActive(iobase, OFF);
570 udelay(20);
571 }
572 }
573 }
574 break;
575
576 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
577 UseOneRX(iobase, ON); //use ONE RX....RX1
578 InvertTX(iobase, OFF);
579 InvertRX(iobase, OFF); // invert RX pin
580
581 EnRX2(iobase, ON);
582 EnGPIOtoRX2(iobase, OFF);
583 if (IsSIROn(iobase)) { //sir
584 // Mode select On
585 SlowIRRXLowActive(iobase, ON);
586 udelay(20);
587 // Mode select Off
588 SlowIRRXLowActive(iobase, OFF);
589 }
590 if (IsMIROn(iobase)) { //mir
591 // Mode select On
592 SlowIRRXLowActive(iobase, OFF);
593 udelay(20);
594 // Mode select Off
595 SlowIRRXLowActive(iobase, ON);
596 } else { // fir
597 if (IsFIROn(iobase)) { //fir
598 // Mode select On
599 SlowIRRXLowActive(iobase, OFF);
600 // TX On
601 WriteTX(iobase, ON);
602 udelay(20);
603 // Mode select OFF
604 SlowIRRXLowActive(iobase, ON);
605 udelay(20);
606 // TX Off
607 WriteTX(iobase, OFF);
608 }
609 }
610 break;
611
612 case 0x0d:
613 UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
614 InvertTX(iobase, OFF);
615 InvertRX(iobase, OFF);
616 SlowIRRXLowActive(iobase, OFF);
617 if (IsSIROn(iobase)) { //sir
618 EnGPIOtoRX2(iobase, OFF);
619 WriteGIO(iobase, OFF);
620 EnRX2(iobase, OFF); //sir to rx2
621 } else { // fir mir
622 EnGPIOtoRX2(iobase, OFF);
623 WriteGIO(iobase, OFF);
624 EnRX2(iobase, OFF); //fir to rx
625 }
626 break;
627
628 case 0x11: /* Temic TFDS4500 */
629
630 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
631
632 UseOneRX(iobase, ON); //use ONE RX....RX1
633 InvertTX(iobase, OFF);
634 InvertRX(iobase, ON); // invert RX pin
635
636 EnRX2(iobase, ON); //sir to rx2
637 EnGPIOtoRX2(iobase, OFF);
638
639 if( IsSIROn(iobase) ){ //sir
640
641 // Mode select On
642 SlowIRRXLowActive(iobase, ON);
643 udelay(20);
644 // Mode select Off
645 SlowIRRXLowActive(iobase, OFF);
646
647 } else{
648 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
649 }
650 break;
651
652 case 0x0ff: /* Vishay */
653 if (IsSIROn(iobase))
654 mode = 0;
655 else if (IsMIROn(iobase))
656 mode = 1;
657 else if (IsFIROn(iobase))
658 mode = 2;
659 else if (IsVFIROn(iobase))
660 mode = 5; //VFIR-16
661 SI_SetMode(iobase, mode);
662 break;
663
664 default:
665 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
666 __func__, dongle_id);
667 }
668 }
669
670 /*
671 * Function via_ircc_change_speed (self, baud)
672 *
673 * Change the speed of the device
674 *
675 */
via_ircc_change_speed(struct via_ircc_cb * self,__u32 speed)676 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
677 {
678 struct net_device *dev = self->netdev;
679 u16 iobase;
680 u8 value = 0, bTmp;
681
682 iobase = self->io.fir_base;
683 /* Update accounting for new speed */
684 self->io.speed = speed;
685 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
686
687 WriteReg(iobase, I_ST_CT_0, 0x0);
688
689 /* Controller mode sellection */
690 switch (speed) {
691 case 2400:
692 case 9600:
693 case 19200:
694 case 38400:
695 case 57600:
696 case 115200:
697 value = (115200/speed)-1;
698 SetSIR(iobase, ON);
699 CRC16(iobase, ON);
700 break;
701 case 576000:
702 /* FIXME: this can't be right, as it's the same as 115200,
703 * and 576000 is MIR, not SIR. */
704 value = 0;
705 SetSIR(iobase, ON);
706 CRC16(iobase, ON);
707 break;
708 case 1152000:
709 value = 0;
710 SetMIR(iobase, ON);
711 /* FIXME: CRC ??? */
712 break;
713 case 4000000:
714 value = 0;
715 SetFIR(iobase, ON);
716 SetPulseWidth(iobase, 0);
717 SetSendPreambleCount(iobase, 14);
718 CRC16(iobase, OFF);
719 EnTXCRC(iobase, ON);
720 break;
721 case 16000000:
722 value = 0;
723 SetVFIR(iobase, ON);
724 /* FIXME: CRC ??? */
725 break;
726 default:
727 value = 0;
728 break;
729 }
730
731 /* Set baudrate to 0x19[2..7] */
732 bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
733 bTmp |= value << 2;
734 WriteReg(iobase, I_CF_H_1, bTmp);
735
736 /* Some dongles may need to be informed about speed changes. */
737 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
738
739 /* Set FIFO size to 64 */
740 SetFIFO(iobase, 64);
741
742 /* Enable IR */
743 WriteReg(iobase, I_ST_CT_0, 0x80);
744
745 // EnTXFIFOHalfLevelInt(iobase,ON);
746
747 /* Enable some interrupts so we can receive frames */
748 //EnAllInt(iobase,ON);
749
750 if (IsSIROn(iobase)) {
751 SIRFilter(iobase, ON);
752 SIRRecvAny(iobase, ON);
753 } else {
754 SIRFilter(iobase, OFF);
755 SIRRecvAny(iobase, OFF);
756 }
757
758 if (speed > 115200) {
759 /* Install FIR xmit handler */
760 dev->netdev_ops = &via_ircc_fir_ops;
761 via_ircc_dma_receive(self);
762 } else {
763 /* Install SIR xmit handler */
764 dev->netdev_ops = &via_ircc_sir_ops;
765 }
766 netif_wake_queue(dev);
767 }
768
769 /*
770 * Function via_ircc_hard_xmit (skb, dev)
771 *
772 * Transmit the frame!
773 *
774 */
via_ircc_hard_xmit_sir(struct sk_buff * skb,struct net_device * dev)775 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
776 struct net_device *dev)
777 {
778 struct via_ircc_cb *self;
779 unsigned long flags;
780 u16 iobase;
781 __u32 speed;
782
783 self = netdev_priv(dev);
784 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
785 iobase = self->io.fir_base;
786
787 netif_stop_queue(dev);
788 /* Check if we need to change the speed */
789 speed = irda_get_next_speed(skb);
790 if ((speed != self->io.speed) && (speed != -1)) {
791 /* Check for empty frame */
792 if (!skb->len) {
793 via_ircc_change_speed(self, speed);
794 dev->trans_start = jiffies;
795 dev_kfree_skb(skb);
796 return NETDEV_TX_OK;
797 } else
798 self->new_speed = speed;
799 }
800 InitCard(iobase);
801 CommonInit(iobase);
802 SIRFilter(iobase, ON);
803 SetSIR(iobase, ON);
804 CRC16(iobase, ON);
805 EnTXCRC(iobase, 0);
806 WriteReg(iobase, I_ST_CT_0, 0x00);
807
808 spin_lock_irqsave(&self->lock, flags);
809 self->tx_buff.data = self->tx_buff.head;
810 self->tx_buff.len =
811 async_wrap_skb(skb, self->tx_buff.data,
812 self->tx_buff.truesize);
813
814 dev->stats.tx_bytes += self->tx_buff.len;
815 /* Send this frame with old speed */
816 SetBaudRate(iobase, self->io.speed);
817 SetPulseWidth(iobase, 12);
818 SetSendPreambleCount(iobase, 0);
819 WriteReg(iobase, I_ST_CT_0, 0x80);
820
821 EnableTX(iobase, ON);
822 EnableRX(iobase, OFF);
823
824 ResetChip(iobase, 0);
825 ResetChip(iobase, 1);
826 ResetChip(iobase, 2);
827 ResetChip(iobase, 3);
828 ResetChip(iobase, 4);
829
830 EnAllInt(iobase, ON);
831 EnTXDMA(iobase, ON);
832 EnRXDMA(iobase, OFF);
833
834 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
835 DMA_TX_MODE);
836
837 SetSendByte(iobase, self->tx_buff.len);
838 RXStart(iobase, OFF);
839 TXStart(iobase, ON);
840
841 dev->trans_start = jiffies;
842 spin_unlock_irqrestore(&self->lock, flags);
843 dev_kfree_skb(skb);
844 return NETDEV_TX_OK;
845 }
846
via_ircc_hard_xmit_fir(struct sk_buff * skb,struct net_device * dev)847 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
848 struct net_device *dev)
849 {
850 struct via_ircc_cb *self;
851 u16 iobase;
852 __u32 speed;
853 unsigned long flags;
854
855 self = netdev_priv(dev);
856 iobase = self->io.fir_base;
857
858 if (self->st_fifo.len)
859 return NETDEV_TX_OK;
860 if (self->chip_id == 0x3076)
861 iodelay(1500);
862 else
863 udelay(1500);
864 netif_stop_queue(dev);
865 speed = irda_get_next_speed(skb);
866 if ((speed != self->io.speed) && (speed != -1)) {
867 if (!skb->len) {
868 via_ircc_change_speed(self, speed);
869 dev->trans_start = jiffies;
870 dev_kfree_skb(skb);
871 return NETDEV_TX_OK;
872 } else
873 self->new_speed = speed;
874 }
875 spin_lock_irqsave(&self->lock, flags);
876 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
877 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
878
879 self->tx_fifo.tail += skb->len;
880 dev->stats.tx_bytes += skb->len;
881 skb_copy_from_linear_data(skb,
882 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
883 self->tx_fifo.len++;
884 self->tx_fifo.free++;
885 //F01 if (self->tx_fifo.len == 1) {
886 via_ircc_dma_xmit(self, iobase);
887 //F01 }
888 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
889 dev->trans_start = jiffies;
890 dev_kfree_skb(skb);
891 spin_unlock_irqrestore(&self->lock, flags);
892 return NETDEV_TX_OK;
893
894 }
895
via_ircc_dma_xmit(struct via_ircc_cb * self,u16 iobase)896 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
897 {
898 EnTXDMA(iobase, OFF);
899 self->io.direction = IO_XMIT;
900 EnPhys(iobase, ON);
901 EnableTX(iobase, ON);
902 EnableRX(iobase, OFF);
903 ResetChip(iobase, 0);
904 ResetChip(iobase, 1);
905 ResetChip(iobase, 2);
906 ResetChip(iobase, 3);
907 ResetChip(iobase, 4);
908 EnAllInt(iobase, ON);
909 EnTXDMA(iobase, ON);
910 EnRXDMA(iobase, OFF);
911 irda_setup_dma(self->io.dma,
912 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
913 self->tx_buff.head) + self->tx_buff_dma,
914 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
915 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
916 __func__, self->tx_fifo.ptr,
917 self->tx_fifo.queue[self->tx_fifo.ptr].len,
918 self->tx_fifo.len);
919
920 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
921 RXStart(iobase, OFF);
922 TXStart(iobase, ON);
923 return 0;
924
925 }
926
927 /*
928 * Function via_ircc_dma_xmit_complete (self)
929 *
930 * The transfer of a frame in finished. This function will only be called
931 * by the interrupt handler
932 *
933 */
via_ircc_dma_xmit_complete(struct via_ircc_cb * self)934 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
935 {
936 int iobase;
937 int ret = TRUE;
938 u8 Tx_status;
939
940 IRDA_DEBUG(3, "%s()\n", __func__);
941
942 iobase = self->io.fir_base;
943 /* Disable DMA */
944 // DisableDmaChannel(self->io.dma);
945 /* Check for underrun! */
946 /* Clear bit, by writing 1 into it */
947 Tx_status = GetTXStatus(iobase);
948 if (Tx_status & 0x08) {
949 self->netdev->stats.tx_errors++;
950 self->netdev->stats.tx_fifo_errors++;
951 hwreset(self);
952 /* how to clear underrun? */
953 } else {
954 self->netdev->stats.tx_packets++;
955 ResetChip(iobase, 3);
956 ResetChip(iobase, 4);
957 }
958 /* Check if we need to change the speed */
959 if (self->new_speed) {
960 via_ircc_change_speed(self, self->new_speed);
961 self->new_speed = 0;
962 }
963
964 /* Finished with this frame, so prepare for next */
965 if (IsFIROn(iobase)) {
966 if (self->tx_fifo.len) {
967 self->tx_fifo.len--;
968 self->tx_fifo.ptr++;
969 }
970 }
971 IRDA_DEBUG(1,
972 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
973 __func__,
974 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
975 /* F01_S
976 // Any frames to be sent back-to-back?
977 if (self->tx_fifo.len) {
978 // Not finished yet!
979 via_ircc_dma_xmit(self, iobase);
980 ret = FALSE;
981 } else {
982 F01_E*/
983 // Reset Tx FIFO info
984 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
985 self->tx_fifo.tail = self->tx_buff.head;
986 //F01 }
987
988 // Make sure we have room for more frames
989 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
990 // Not busy transmitting anymore
991 // Tell the network layer, that we can accept more frames
992 netif_wake_queue(self->netdev);
993 //F01 }
994 return ret;
995 }
996
997 /*
998 * Function via_ircc_dma_receive (self)
999 *
1000 * Set configuration for receive a frame.
1001 *
1002 */
via_ircc_dma_receive(struct via_ircc_cb * self)1003 static int via_ircc_dma_receive(struct via_ircc_cb *self)
1004 {
1005 int iobase;
1006
1007 iobase = self->io.fir_base;
1008
1009 IRDA_DEBUG(3, "%s()\n", __func__);
1010
1011 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1012 self->tx_fifo.tail = self->tx_buff.head;
1013 self->RxDataReady = 0;
1014 self->io.direction = IO_RECV;
1015 self->rx_buff.data = self->rx_buff.head;
1016 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1017 self->st_fifo.tail = self->st_fifo.head = 0;
1018
1019 EnPhys(iobase, ON);
1020 EnableTX(iobase, OFF);
1021 EnableRX(iobase, ON);
1022
1023 ResetChip(iobase, 0);
1024 ResetChip(iobase, 1);
1025 ResetChip(iobase, 2);
1026 ResetChip(iobase, 3);
1027 ResetChip(iobase, 4);
1028
1029 EnAllInt(iobase, ON);
1030 EnTXDMA(iobase, OFF);
1031 EnRXDMA(iobase, ON);
1032 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1033 self->rx_buff.truesize, DMA_RX_MODE);
1034 TXStart(iobase, OFF);
1035 RXStart(iobase, ON);
1036
1037 return 0;
1038 }
1039
1040 /*
1041 * Function via_ircc_dma_receive_complete (self)
1042 *
1043 * Controller Finished with receiving frames,
1044 * and this routine is call by ISR
1045 *
1046 */
via_ircc_dma_receive_complete(struct via_ircc_cb * self,int iobase)1047 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1048 int iobase)
1049 {
1050 struct st_fifo *st_fifo;
1051 struct sk_buff *skb;
1052 int len, i;
1053 u8 status = 0;
1054
1055 iobase = self->io.fir_base;
1056 st_fifo = &self->st_fifo;
1057
1058 if (self->io.speed < 4000000) { //Speed below FIR
1059 len = GetRecvByte(iobase, self);
1060 skb = dev_alloc_skb(len + 1);
1061 if (skb == NULL)
1062 return FALSE;
1063 // Make sure IP header gets aligned
1064 skb_reserve(skb, 1);
1065 skb_put(skb, len - 2);
1066 if (self->chip_id == 0x3076) {
1067 for (i = 0; i < len - 2; i++)
1068 skb->data[i] = self->rx_buff.data[i * 2];
1069 } else {
1070 if (self->chip_id == 0x3096) {
1071 for (i = 0; i < len - 2; i++)
1072 skb->data[i] =
1073 self->rx_buff.data[i];
1074 }
1075 }
1076 // Move to next frame
1077 self->rx_buff.data += len;
1078 self->netdev->stats.rx_bytes += len;
1079 self->netdev->stats.rx_packets++;
1080 skb->dev = self->netdev;
1081 skb_reset_mac_header(skb);
1082 skb->protocol = htons(ETH_P_IRDA);
1083 netif_rx(skb);
1084 return TRUE;
1085 }
1086
1087 else { //FIR mode
1088 len = GetRecvByte(iobase, self);
1089 if (len == 0)
1090 return TRUE; //interrupt only, data maybe move by RxT
1091 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1092 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1093 __func__, len, RxCurCount(iobase, self),
1094 self->RxLastCount);
1095 hwreset(self);
1096 return FALSE;
1097 }
1098 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1099 __func__,
1100 st_fifo->len, len - 4, RxCurCount(iobase, self));
1101
1102 st_fifo->entries[st_fifo->tail].status = status;
1103 st_fifo->entries[st_fifo->tail].len = len;
1104 st_fifo->pending_bytes += len;
1105 st_fifo->tail++;
1106 st_fifo->len++;
1107 if (st_fifo->tail > MAX_RX_WINDOW)
1108 st_fifo->tail = 0;
1109 self->RxDataReady = 0;
1110
1111 // It maybe have MAX_RX_WINDOW package receive by
1112 // receive_complete before Timer IRQ
1113 /* F01_S
1114 if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
1115 RXStart(iobase,ON);
1116 SetTimer(iobase,4);
1117 }
1118 else {
1119 F01_E */
1120 EnableRX(iobase, OFF);
1121 EnRXDMA(iobase, OFF);
1122 RXStart(iobase, OFF);
1123 //F01_S
1124 // Put this entry back in fifo
1125 if (st_fifo->head > MAX_RX_WINDOW)
1126 st_fifo->head = 0;
1127 status = st_fifo->entries[st_fifo->head].status;
1128 len = st_fifo->entries[st_fifo->head].len;
1129 st_fifo->head++;
1130 st_fifo->len--;
1131
1132 skb = dev_alloc_skb(len + 1 - 4);
1133 /*
1134 * if frame size, data ptr, or skb ptr are wrong, then get next
1135 * entry.
1136 */
1137 if ((skb == NULL) || (skb->data == NULL) ||
1138 (self->rx_buff.data == NULL) || (len < 6)) {
1139 self->netdev->stats.rx_dropped++;
1140 kfree_skb(skb);
1141 return TRUE;
1142 }
1143 skb_reserve(skb, 1);
1144 skb_put(skb, len - 4);
1145
1146 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1147 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1148 len - 4, self->rx_buff.data);
1149
1150 // Move to next frame
1151 self->rx_buff.data += len;
1152 self->netdev->stats.rx_bytes += len;
1153 self->netdev->stats.rx_packets++;
1154 skb->dev = self->netdev;
1155 skb_reset_mac_header(skb);
1156 skb->protocol = htons(ETH_P_IRDA);
1157 netif_rx(skb);
1158
1159 //F01_E
1160 } //FIR
1161 return TRUE;
1162
1163 }
1164
1165 /*
1166 * if frame is received , but no INT ,then use this routine to upload frame.
1167 */
upload_rxdata(struct via_ircc_cb * self,int iobase)1168 static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1169 {
1170 struct sk_buff *skb;
1171 int len;
1172 struct st_fifo *st_fifo;
1173 st_fifo = &self->st_fifo;
1174
1175 len = GetRecvByte(iobase, self);
1176
1177 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1178
1179 if ((len - 4) < 2) {
1180 self->netdev->stats.rx_dropped++;
1181 return FALSE;
1182 }
1183
1184 skb = dev_alloc_skb(len + 1);
1185 if (skb == NULL) {
1186 self->netdev->stats.rx_dropped++;
1187 return FALSE;
1188 }
1189 skb_reserve(skb, 1);
1190 skb_put(skb, len - 4 + 1);
1191 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1192 st_fifo->tail++;
1193 st_fifo->len++;
1194 if (st_fifo->tail > MAX_RX_WINDOW)
1195 st_fifo->tail = 0;
1196 // Move to next frame
1197 self->rx_buff.data += len;
1198 self->netdev->stats.rx_bytes += len;
1199 self->netdev->stats.rx_packets++;
1200 skb->dev = self->netdev;
1201 skb_reset_mac_header(skb);
1202 skb->protocol = htons(ETH_P_IRDA);
1203 netif_rx(skb);
1204 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1205 RXStart(iobase, ON);
1206 } else {
1207 EnableRX(iobase, OFF);
1208 EnRXDMA(iobase, OFF);
1209 RXStart(iobase, OFF);
1210 }
1211 return TRUE;
1212 }
1213
1214 /*
1215 * Implement back to back receive , use this routine to upload data.
1216 */
1217
RxTimerHandler(struct via_ircc_cb * self,int iobase)1218 static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1219 {
1220 struct st_fifo *st_fifo;
1221 struct sk_buff *skb;
1222 int len;
1223 u8 status;
1224
1225 st_fifo = &self->st_fifo;
1226
1227 if (CkRxRecv(iobase, self)) {
1228 // if still receiving ,then return ,don't upload frame
1229 self->RetryCount = 0;
1230 SetTimer(iobase, 20);
1231 self->RxDataReady++;
1232 return FALSE;
1233 } else
1234 self->RetryCount++;
1235
1236 if ((self->RetryCount >= 1) ||
1237 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1238 (st_fifo->len >= (MAX_RX_WINDOW))) {
1239 while (st_fifo->len > 0) { //upload frame
1240 // Put this entry back in fifo
1241 if (st_fifo->head > MAX_RX_WINDOW)
1242 st_fifo->head = 0;
1243 status = st_fifo->entries[st_fifo->head].status;
1244 len = st_fifo->entries[st_fifo->head].len;
1245 st_fifo->head++;
1246 st_fifo->len--;
1247
1248 skb = dev_alloc_skb(len + 1 - 4);
1249 /*
1250 * if frame size, data ptr, or skb ptr are wrong,
1251 * then get next entry.
1252 */
1253 if ((skb == NULL) || (skb->data == NULL) ||
1254 (self->rx_buff.data == NULL) || (len < 6)) {
1255 self->netdev->stats.rx_dropped++;
1256 continue;
1257 }
1258 skb_reserve(skb, 1);
1259 skb_put(skb, len - 4);
1260 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1261
1262 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1263 len - 4, st_fifo->head);
1264
1265 // Move to next frame
1266 self->rx_buff.data += len;
1267 self->netdev->stats.rx_bytes += len;
1268 self->netdev->stats.rx_packets++;
1269 skb->dev = self->netdev;
1270 skb_reset_mac_header(skb);
1271 skb->protocol = htons(ETH_P_IRDA);
1272 netif_rx(skb);
1273 } //while
1274 self->RetryCount = 0;
1275
1276 IRDA_DEBUG(2,
1277 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1278 __func__,
1279 GetHostStatus(iobase), GetRXStatus(iobase));
1280
1281 /*
1282 * if frame is receive complete at this routine ,then upload
1283 * frame.
1284 */
1285 if ((GetRXStatus(iobase) & 0x10) &&
1286 (RxCurCount(iobase, self) != self->RxLastCount)) {
1287 upload_rxdata(self, iobase);
1288 if (irda_device_txqueue_empty(self->netdev))
1289 via_ircc_dma_receive(self);
1290 }
1291 } // timer detect complete
1292 else
1293 SetTimer(iobase, 4);
1294 return TRUE;
1295
1296 }
1297
1298
1299
1300 /*
1301 * Function via_ircc_interrupt (irq, dev_id)
1302 *
1303 * An interrupt from the chip has arrived. Time to do some work
1304 *
1305 */
via_ircc_interrupt(int dummy,void * dev_id)1306 static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1307 {
1308 struct net_device *dev = dev_id;
1309 struct via_ircc_cb *self = netdev_priv(dev);
1310 int iobase;
1311 u8 iHostIntType, iRxIntType, iTxIntType;
1312
1313 iobase = self->io.fir_base;
1314 spin_lock(&self->lock);
1315 iHostIntType = GetHostStatus(iobase);
1316
1317 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1318 __func__, iHostIntType,
1319 (iHostIntType & 0x40) ? "Timer" : "",
1320 (iHostIntType & 0x20) ? "Tx" : "",
1321 (iHostIntType & 0x10) ? "Rx" : "",
1322 (iHostIntType & 0x0e) >> 1);
1323
1324 if ((iHostIntType & 0x40) != 0) { //Timer Event
1325 self->EventFlag.TimeOut++;
1326 ClearTimerInt(iobase, 1);
1327 if (self->io.direction == IO_XMIT) {
1328 via_ircc_dma_xmit(self, iobase);
1329 }
1330 if (self->io.direction == IO_RECV) {
1331 /*
1332 * frame ready hold too long, must reset.
1333 */
1334 if (self->RxDataReady > 30) {
1335 hwreset(self);
1336 if (irda_device_txqueue_empty(self->netdev)) {
1337 via_ircc_dma_receive(self);
1338 }
1339 } else { // call this to upload frame.
1340 RxTimerHandler(self, iobase);
1341 }
1342 } //RECV
1343 } //Timer Event
1344 if ((iHostIntType & 0x20) != 0) { //Tx Event
1345 iTxIntType = GetTXStatus(iobase);
1346
1347 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1348 __func__, iTxIntType,
1349 (iTxIntType & 0x08) ? "FIFO underr." : "",
1350 (iTxIntType & 0x04) ? "EOM" : "",
1351 (iTxIntType & 0x02) ? "FIFO ready" : "",
1352 (iTxIntType & 0x01) ? "Early EOM" : "");
1353
1354 if (iTxIntType & 0x4) {
1355 self->EventFlag.EOMessage++; // read and will auto clean
1356 if (via_ircc_dma_xmit_complete(self)) {
1357 if (irda_device_txqueue_empty
1358 (self->netdev)) {
1359 via_ircc_dma_receive(self);
1360 }
1361 } else {
1362 self->EventFlag.Unknown++;
1363 }
1364 } //EOP
1365 } //Tx Event
1366 //----------------------------------------
1367 if ((iHostIntType & 0x10) != 0) { //Rx Event
1368 /* Check if DMA has finished */
1369 iRxIntType = GetRXStatus(iobase);
1370
1371 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1372 __func__, iRxIntType,
1373 (iRxIntType & 0x80) ? "PHY err." : "",
1374 (iRxIntType & 0x40) ? "CRC err" : "",
1375 (iRxIntType & 0x20) ? "FIFO overr." : "",
1376 (iRxIntType & 0x10) ? "EOF" : "",
1377 (iRxIntType & 0x08) ? "RxData" : "",
1378 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1379 (iRxIntType & 0x01) ? "SIR bad" : "");
1380 if (!iRxIntType)
1381 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1382
1383 if (iRxIntType & 0x10) {
1384 if (via_ircc_dma_receive_complete(self, iobase)) {
1385 //F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
1386 via_ircc_dma_receive(self);
1387 }
1388 } // No ERR
1389 else { //ERR
1390 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1391 __func__, iRxIntType, iHostIntType,
1392 RxCurCount(iobase, self),
1393 self->RxLastCount);
1394
1395 if (iRxIntType & 0x20) { //FIFO OverRun ERR
1396 ResetChip(iobase, 0);
1397 ResetChip(iobase, 1);
1398 } else { //PHY,CRC ERR
1399
1400 if (iRxIntType != 0x08)
1401 hwreset(self); //F01
1402 }
1403 via_ircc_dma_receive(self);
1404 } //ERR
1405
1406 } //Rx Event
1407 spin_unlock(&self->lock);
1408 return IRQ_RETVAL(iHostIntType);
1409 }
1410
hwreset(struct via_ircc_cb * self)1411 static void hwreset(struct via_ircc_cb *self)
1412 {
1413 int iobase;
1414 iobase = self->io.fir_base;
1415
1416 IRDA_DEBUG(3, "%s()\n", __func__);
1417
1418 ResetChip(iobase, 5);
1419 EnableDMA(iobase, OFF);
1420 EnableTX(iobase, OFF);
1421 EnableRX(iobase, OFF);
1422 EnRXDMA(iobase, OFF);
1423 EnTXDMA(iobase, OFF);
1424 RXStart(iobase, OFF);
1425 TXStart(iobase, OFF);
1426 InitCard(iobase);
1427 CommonInit(iobase);
1428 SIRFilter(iobase, ON);
1429 SetSIR(iobase, ON);
1430 CRC16(iobase, ON);
1431 EnTXCRC(iobase, 0);
1432 WriteReg(iobase, I_ST_CT_0, 0x00);
1433 SetBaudRate(iobase, 9600);
1434 SetPulseWidth(iobase, 12);
1435 SetSendPreambleCount(iobase, 0);
1436 WriteReg(iobase, I_ST_CT_0, 0x80);
1437
1438 /* Restore speed. */
1439 via_ircc_change_speed(self, self->io.speed);
1440
1441 self->st_fifo.len = 0;
1442 }
1443
1444 /*
1445 * Function via_ircc_is_receiving (self)
1446 *
1447 * Return TRUE is we are currently receiving a frame
1448 *
1449 */
via_ircc_is_receiving(struct via_ircc_cb * self)1450 static int via_ircc_is_receiving(struct via_ircc_cb *self)
1451 {
1452 int status = FALSE;
1453 int iobase;
1454
1455 IRDA_ASSERT(self != NULL, return FALSE;);
1456
1457 iobase = self->io.fir_base;
1458 if (CkRxRecv(iobase, self))
1459 status = TRUE;
1460
1461 IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1462
1463 return status;
1464 }
1465
1466
1467 /*
1468 * Function via_ircc_net_open (dev)
1469 *
1470 * Start the device
1471 *
1472 */
via_ircc_net_open(struct net_device * dev)1473 static int via_ircc_net_open(struct net_device *dev)
1474 {
1475 struct via_ircc_cb *self;
1476 int iobase;
1477 char hwname[32];
1478
1479 IRDA_DEBUG(3, "%s()\n", __func__);
1480
1481 IRDA_ASSERT(dev != NULL, return -1;);
1482 self = netdev_priv(dev);
1483 dev->stats.rx_packets = 0;
1484 IRDA_ASSERT(self != NULL, return 0;);
1485 iobase = self->io.fir_base;
1486 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1487 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1488 self->io.irq);
1489 return -EAGAIN;
1490 }
1491 /*
1492 * Always allocate the DMA channel after the IRQ, and clean up on
1493 * failure.
1494 */
1495 if (request_dma(self->io.dma, dev->name)) {
1496 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1497 self->io.dma);
1498 free_irq(self->io.irq, dev);
1499 return -EAGAIN;
1500 }
1501 if (self->io.dma2 != self->io.dma) {
1502 if (request_dma(self->io.dma2, dev->name)) {
1503 IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1504 driver_name, self->io.dma2);
1505 free_irq(self->io.irq, dev);
1506 free_dma(self->io.dma);
1507 return -EAGAIN;
1508 }
1509 }
1510
1511
1512 /* turn on interrupts */
1513 EnAllInt(iobase, ON);
1514 EnInternalLoop(iobase, OFF);
1515 EnExternalLoop(iobase, OFF);
1516
1517 /* */
1518 via_ircc_dma_receive(self);
1519
1520 /* Ready to play! */
1521 netif_start_queue(dev);
1522
1523 /*
1524 * Open new IrLAP layer instance, now that everything should be
1525 * initialized properly
1526 */
1527 sprintf(hwname, "VIA @ 0x%x", iobase);
1528 self->irlap = irlap_open(dev, &self->qos, hwname);
1529
1530 self->RxLastCount = 0;
1531
1532 return 0;
1533 }
1534
1535 /*
1536 * Function via_ircc_net_close (dev)
1537 *
1538 * Stop the device
1539 *
1540 */
via_ircc_net_close(struct net_device * dev)1541 static int via_ircc_net_close(struct net_device *dev)
1542 {
1543 struct via_ircc_cb *self;
1544 int iobase;
1545
1546 IRDA_DEBUG(3, "%s()\n", __func__);
1547
1548 IRDA_ASSERT(dev != NULL, return -1;);
1549 self = netdev_priv(dev);
1550 IRDA_ASSERT(self != NULL, return 0;);
1551
1552 /* Stop device */
1553 netif_stop_queue(dev);
1554 /* Stop and remove instance of IrLAP */
1555 if (self->irlap)
1556 irlap_close(self->irlap);
1557 self->irlap = NULL;
1558 iobase = self->io.fir_base;
1559 EnTXDMA(iobase, OFF);
1560 EnRXDMA(iobase, OFF);
1561 DisableDmaChannel(self->io.dma);
1562
1563 /* Disable interrupts */
1564 EnAllInt(iobase, OFF);
1565 free_irq(self->io.irq, dev);
1566 free_dma(self->io.dma);
1567 if (self->io.dma2 != self->io.dma)
1568 free_dma(self->io.dma2);
1569
1570 return 0;
1571 }
1572
1573 /*
1574 * Function via_ircc_net_ioctl (dev, rq, cmd)
1575 *
1576 * Process IOCTL commands for this device
1577 *
1578 */
via_ircc_net_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1579 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1580 int cmd)
1581 {
1582 struct if_irda_req *irq = (struct if_irda_req *) rq;
1583 struct via_ircc_cb *self;
1584 unsigned long flags;
1585 int ret = 0;
1586
1587 IRDA_ASSERT(dev != NULL, return -1;);
1588 self = netdev_priv(dev);
1589 IRDA_ASSERT(self != NULL, return -1;);
1590 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1591 cmd);
1592 /* Disable interrupts & save flags */
1593 spin_lock_irqsave(&self->lock, flags);
1594 switch (cmd) {
1595 case SIOCSBANDWIDTH: /* Set bandwidth */
1596 if (!capable(CAP_NET_ADMIN)) {
1597 ret = -EPERM;
1598 goto out;
1599 }
1600 via_ircc_change_speed(self, irq->ifr_baudrate);
1601 break;
1602 case SIOCSMEDIABUSY: /* Set media busy */
1603 if (!capable(CAP_NET_ADMIN)) {
1604 ret = -EPERM;
1605 goto out;
1606 }
1607 irda_device_set_media_busy(self->netdev, TRUE);
1608 break;
1609 case SIOCGRECEIVING: /* Check if we are receiving right now */
1610 irq->ifr_receiving = via_ircc_is_receiving(self);
1611 break;
1612 default:
1613 ret = -EOPNOTSUPP;
1614 }
1615 out:
1616 spin_unlock_irqrestore(&self->lock, flags);
1617 return ret;
1618 }
1619
1620 MODULE_AUTHOR("VIA Technologies,inc");
1621 MODULE_DESCRIPTION("VIA IrDA Device Driver");
1622 MODULE_LICENSE("GPL");
1623
1624 module_init(via_ircc_init);
1625 module_exit(via_ircc_cleanup);
1626