Lines Matching refs:he_dev

118 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
122 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
143 static void he_stop(struct he_dev *dev);
147 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
151 struct he_dev *he_devs = NULL;
178 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr, in he_writel_internal() argument
181 he_writel(he_dev, val, CON_DAT); in he_writel_internal()
182 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */ in he_writel_internal()
183 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL); in he_writel_internal()
184 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); in he_writel_internal()
197 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags) in he_readl_internal() argument
199 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL); in he_readl_internal()
200 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); in he_readl_internal()
201 return he_readl(he_dev, CON_DAT); in he_readl_internal()
324 __find_vcc(struct he_dev *he_dev, unsigned cid) in __find_vcc() argument
331 vpi = cid >> he_dev->vcibits; in __find_vcc()
332 vci = cid & ((1 << he_dev->vcibits) - 1); in __find_vcc()
337 vcc->dev == he_dev->atm_dev && in __find_vcc()
350 struct he_dev *he_dev = NULL; in he_init_one() local
370 he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev), in he_init_one()
372 if (!he_dev) { in he_init_one()
376 memset(he_dev, 0, sizeof(struct he_dev)); in he_init_one()
378 he_dev->pci_dev = pci_dev; in he_init_one()
379 he_dev->atm_dev = atm_dev; in he_init_one()
380 he_dev->atm_dev->dev_data = he_dev; in he_init_one()
381 atm_dev->dev_data = he_dev; in he_init_one()
382 he_dev->number = atm_dev->number; in he_init_one()
384 he_stop(he_dev); in he_init_one()
388 he_dev->next = NULL; in he_init_one()
390 he_dev->next = he_devs; in he_init_one()
391 he_devs = he_dev; in he_init_one()
397 if (he_dev) in he_init_one()
398 kfree(he_dev); in he_init_one()
407 struct he_dev *he_dev; in he_remove_one() local
410 he_dev = HE_DEV(atm_dev); in he_remove_one()
414 he_stop(he_dev); in he_remove_one()
416 kfree(he_dev); in he_remove_one()
443 he_init_rx_lbfp0(struct he_dev *he_dev) in he_init_rx_lbfp0() argument
446 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_init_rx_lbfp0()
447 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; in he_init_rx_lbfp0()
448 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row; in he_init_rx_lbfp0()
451 lbm_offset = he_readl(he_dev, RCMLBM_BA); in he_init_rx_lbfp0()
453 he_writel(he_dev, lbufd_index, RLBF0_H); in he_init_rx_lbfp0()
455 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) { in he_init_rx_lbfp0()
459 he_writel_rcm(he_dev, lbuf_addr, lbm_offset); in he_init_rx_lbfp0()
460 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); in he_init_rx_lbfp0()
464 row_offset += he_dev->bytes_per_row; in he_init_rx_lbfp0()
469 he_writel(he_dev, lbufd_index - 2, RLBF0_T); in he_init_rx_lbfp0()
470 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C); in he_init_rx_lbfp0()
474 he_init_rx_lbfp1(struct he_dev *he_dev) in he_init_rx_lbfp1() argument
477 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_init_rx_lbfp1()
478 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; in he_init_rx_lbfp1()
479 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row; in he_init_rx_lbfp1()
482 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); in he_init_rx_lbfp1()
484 he_writel(he_dev, lbufd_index, RLBF1_H); in he_init_rx_lbfp1()
486 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) { in he_init_rx_lbfp1()
490 he_writel_rcm(he_dev, lbuf_addr, lbm_offset); in he_init_rx_lbfp1()
491 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); in he_init_rx_lbfp1()
495 row_offset += he_dev->bytes_per_row; in he_init_rx_lbfp1()
500 he_writel(he_dev, lbufd_index - 2, RLBF1_T); in he_init_rx_lbfp1()
501 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C); in he_init_rx_lbfp1()
505 he_init_tx_lbfp(struct he_dev *he_dev) in he_init_tx_lbfp() argument
508 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_init_tx_lbfp()
509 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; in he_init_tx_lbfp()
510 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row; in he_init_tx_lbfp()
512 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs; in he_init_tx_lbfp()
513 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); in he_init_tx_lbfp()
515 he_writel(he_dev, lbufd_index, TLBF_H); in he_init_tx_lbfp()
517 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) { in he_init_tx_lbfp()
521 he_writel_rcm(he_dev, lbuf_addr, lbm_offset); in he_init_tx_lbfp()
522 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); in he_init_tx_lbfp()
526 row_offset += he_dev->bytes_per_row; in he_init_tx_lbfp()
531 he_writel(he_dev, lbufd_index - 1, TLBF_T); in he_init_tx_lbfp()
535 he_init_tpdrq(struct he_dev *he_dev) in he_init_tpdrq() argument
537 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev, in he_init_tpdrq()
538 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys); in he_init_tpdrq()
539 if (he_dev->tpdrq_base == NULL) { in he_init_tpdrq()
543 memset(he_dev->tpdrq_base, 0, in he_init_tpdrq()
546 he_dev->tpdrq_tail = he_dev->tpdrq_base; in he_init_tpdrq()
547 he_dev->tpdrq_head = he_dev->tpdrq_base; in he_init_tpdrq()
549 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H); in he_init_tpdrq()
550 he_writel(he_dev, 0, TPDRQ_T); in he_init_tpdrq()
551 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S); in he_init_tpdrq()
557 he_init_cs_block(struct he_dev *he_dev) in he_init_cs_block() argument
565 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg); in he_init_cs_block()
569 clock = he_is622(he_dev) ? 66667000 : 50000000; in he_init_cs_block()
570 rate = he_dev->atm_dev->link_rate; in he_init_cs_block()
581 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg); in he_init_cs_block()
585 if (he_is622(he_dev)) { in he_init_cs_block()
587 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0); in he_init_cs_block()
588 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1); in he_init_cs_block()
589 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2); in he_init_cs_block()
590 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3); in he_init_cs_block()
591 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4); in he_init_cs_block()
594 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0); in he_init_cs_block()
595 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1); in he_init_cs_block()
596 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2); in he_init_cs_block()
597 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); in he_init_cs_block()
598 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1); in he_init_cs_block()
599 he_writel_mbox(he_dev, 0x14585, CS_RTFWR); in he_init_cs_block()
601 he_writel_mbox(he_dev, 0x4680, CS_RTATR); in he_init_cs_block()
604 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET); in he_init_cs_block()
605 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX); in he_init_cs_block()
606 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN); in he_init_cs_block()
607 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC); in he_init_cs_block()
608 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC); in he_init_cs_block()
609 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL); in he_init_cs_block()
612 he_writel_mbox(he_dev, 0x5, CS_OTPPER); in he_init_cs_block()
613 he_writel_mbox(he_dev, 0x14, CS_OTWPER); in he_init_cs_block()
616 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0); in he_init_cs_block()
617 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1); in he_init_cs_block()
618 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2); in he_init_cs_block()
619 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3); in he_init_cs_block()
620 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4); in he_init_cs_block()
623 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0); in he_init_cs_block()
624 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1); in he_init_cs_block()
625 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2); in he_init_cs_block()
626 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); in he_init_cs_block()
627 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1); in he_init_cs_block()
628 he_writel_mbox(he_dev, 0xf424, CS_RTFWR); in he_init_cs_block()
630 he_writel_mbox(he_dev, 0x4680, CS_RTATR); in he_init_cs_block()
633 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET); in he_init_cs_block()
634 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX); in he_init_cs_block()
635 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN); in he_init_cs_block()
636 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC); in he_init_cs_block()
637 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC); in he_init_cs_block()
638 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL); in he_init_cs_block()
641 he_writel_mbox(he_dev, 0x6, CS_OTPPER); in he_init_cs_block()
642 he_writel_mbox(he_dev, 0x1e, CS_OTWPER); in he_init_cs_block()
645 he_writel_mbox(he_dev, 0x8, CS_OTTLIM); in he_init_cs_block()
648 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg); in he_init_cs_block()
653 he_init_cs_block_rcm(struct he_dev *he_dev) in he_init_cs_block_rcm() argument
670 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); in he_init_cs_block_rcm()
675 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); in he_init_cs_block_rcm()
685 rate = he_dev->atm_dev->link_rate; in he_init_cs_block_rcm()
740 buf = rate_cps * he_dev->tx_numbuffs / in he_init_cs_block_rcm()
741 (he_dev->atm_dev->link_rate * 2); in he_init_cs_block_rcm()
744 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR; in he_init_cs_block_rcm()
763 he_writel_rcm(he_dev, reg, in he_init_cs_block_rcm()
774 he_init_group(struct he_dev *he_dev, int group) in he_init_group() argument
781 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev, in he_init_group()
783 if (he_dev->rbps_pool == NULL) { in he_init_group()
788 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev, in he_init_group()
789 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys); in he_init_group()
790 if (he_dev->rbps_pages == NULL) { in he_init_group()
796 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev, in he_init_group()
797 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys); in he_init_group()
798 if (he_dev->rbps_base == NULL) { in he_init_group()
802 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp)); in he_init_group()
803 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL); in he_init_group()
810 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle); in he_init_group()
814 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE); in he_init_group()
815 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE); in he_init_group()
818 he_dev->rbps_virt[i].virt = cpuaddr; in he_init_group()
819 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF); in he_init_group()
820 he_dev->rbps_base[i].phys = dma_handle; in he_init_group()
823 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1]; in he_init_group()
825 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32)); in he_init_group()
826 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), in he_init_group()
828 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4, in he_init_group()
830 he_writel(he_dev, in he_init_group()
836 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); in he_init_group()
837 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); in he_init_group()
838 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); in he_init_group()
839 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), in he_init_group()
845 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, in he_init_group()
847 if (he_dev->rbpl_pool == NULL) { in he_init_group()
852 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev, in he_init_group()
853 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys); in he_init_group()
854 if (he_dev->rbpl_pages == NULL) { in he_init_group()
860 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev, in he_init_group()
861 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys); in he_init_group()
862 if (he_dev->rbpl_base == NULL) { in he_init_group()
866 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp)); in he_init_group()
867 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL); in he_init_group()
874 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle); in he_init_group()
878 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE); in he_init_group()
879 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE); in he_init_group()
882 he_dev->rbpl_virt[i].virt = cpuaddr; in he_init_group()
883 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF); in he_init_group()
884 he_dev->rbpl_base[i].phys = dma_handle; in he_init_group()
886 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; in he_init_group()
888 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); in he_init_group()
889 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), in he_init_group()
891 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4, in he_init_group()
893 he_writel(he_dev, in he_init_group()
901 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev, in he_init_group()
902 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys); in he_init_group()
903 if (he_dev->rbrq_base == NULL) { in he_init_group()
907 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq)); in he_init_group()
909 he_dev->rbrq_head = he_dev->rbrq_base; in he_init_group()
910 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16)); in he_init_group()
911 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16)); in he_init_group()
912 he_writel(he_dev, in he_init_group()
917 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7), in he_init_group()
920 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1), in he_init_group()
925 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev, in he_init_group()
926 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys); in he_init_group()
927 if (he_dev->tbrq_base == NULL) { in he_init_group()
931 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq)); in he_init_group()
933 he_dev->tbrq_head = he_dev->tbrq_base; in he_init_group()
935 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16)); in he_init_group()
936 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16)); in he_init_group()
937 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16)); in he_init_group()
938 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16)); in he_init_group()
944 he_init_irq(struct he_dev *he_dev) in he_init_irq() argument
951 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev, in he_init_irq()
952 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys); in he_init_irq()
953 if (he_dev->irq_base == NULL) { in he_init_irq()
957 he_dev->irq_tailoffset = (unsigned *) in he_init_irq()
958 &he_dev->irq_base[CONFIG_IRQ_SIZE]; in he_init_irq()
959 *he_dev->irq_tailoffset = 0; in he_init_irq()
960 he_dev->irq_head = he_dev->irq_base; in he_init_irq()
961 he_dev->irq_tail = he_dev->irq_base; in he_init_irq()
964 he_dev->irq_base[i].isw = ITYPE_INVALID; in he_init_irq()
966 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE); in he_init_irq()
967 he_writel(he_dev, in he_init_irq()
970 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL); in he_init_irq()
971 he_writel(he_dev, 0x0, IRQ0_DATA); in he_init_irq()
973 he_writel(he_dev, 0x0, IRQ1_BASE); in he_init_irq()
974 he_writel(he_dev, 0x0, IRQ1_HEAD); in he_init_irq()
975 he_writel(he_dev, 0x0, IRQ1_CNTL); in he_init_irq()
976 he_writel(he_dev, 0x0, IRQ1_DATA); in he_init_irq()
978 he_writel(he_dev, 0x0, IRQ2_BASE); in he_init_irq()
979 he_writel(he_dev, 0x0, IRQ2_HEAD); in he_init_irq()
980 he_writel(he_dev, 0x0, IRQ2_CNTL); in he_init_irq()
981 he_writel(he_dev, 0x0, IRQ2_DATA); in he_init_irq()
983 he_writel(he_dev, 0x0, IRQ3_BASE); in he_init_irq()
984 he_writel(he_dev, 0x0, IRQ3_HEAD); in he_init_irq()
985 he_writel(he_dev, 0x0, IRQ3_CNTL); in he_init_irq()
986 he_writel(he_dev, 0x0, IRQ3_DATA); in he_init_irq()
990 he_writel(he_dev, 0x0, GRP_10_MAP); in he_init_irq()
991 he_writel(he_dev, 0x0, GRP_32_MAP); in he_init_irq()
992 he_writel(he_dev, 0x0, GRP_54_MAP); in he_init_irq()
993 he_writel(he_dev, 0x0, GRP_76_MAP); in he_init_irq()
995 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) { in he_init_irq()
996 hprintk("irq %d already in use\n", he_dev->pci_dev->irq); in he_init_irq()
1000 he_dev->irq = he_dev->pci_dev->irq; in he_init_irq()
1008 struct he_dev *he_dev; in he_start() local
1019 he_dev = HE_DEV(dev); in he_start()
1020 pci_dev = he_dev->pci_dev; in he_start()
1022 he_dev->membase = pci_dev->resource[0].start; in he_start()
1023 HPRINTK("membase = 0x%lx irq = %d.\n", he_dev->membase, pci_dev->irq); in he_start()
1083 if (!(he_dev->membase = (unsigned long) ioremap(he_dev->membase, HE_REGMAP_SIZE))) { in he_start()
1089 he_writel(he_dev, 0x0, RESET_CNTL); in he_start()
1090 he_writel(he_dev, 0xff, RESET_CNTL); in he_start()
1093 status = he_readl(he_dev, RESET_CNTL); in he_start()
1100 host_cntl = he_readl(he_dev, HOST_CNTL); in he_start()
1118 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i); in he_start()
1120 he_dev->media = read_prom_byte(he_dev, MEDIA); in he_start()
1123 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i); in he_start()
1126 he_dev->prod_id, in he_start()
1127 he_dev->media & 0x40 ? "SM" : "MM", in he_start()
1134 he_dev->atm_dev->link_rate = he_is622(he_dev) ? in he_start()
1138 lb_swap = he_readl(he_dev, LB_SWAP); in he_start()
1139 if (he_is622(he_dev)) in he_start()
1149 he_writel(he_dev, lb_swap, LB_SWAP); in he_start()
1152 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL); in he_start()
1156 he_writel(he_dev, lb_swap, LB_SWAP); in he_start()
1159 if ((err = he_init_irq(he_dev)) != 0) in he_start()
1163 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev); in he_start()
1165 spin_lock_init(&he_dev->global_lock); in he_start()
1170 he_writel(he_dev, host_cntl, HOST_CNTL); in he_start()
1210 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS; in he_start()
1211 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS; in he_start()
1219 he_dev->vpibits = nvpibits; in he_start()
1220 he_dev->vcibits = HE_MAXCIDBITS - nvpibits; in he_start()
1224 he_dev->vcibits = nvcibits; in he_start()
1225 he_dev->vpibits = HE_MAXCIDBITS - nvcibits; in he_start()
1229 if (he_is622(he_dev)) { in he_start()
1230 he_dev->cells_per_row = 40; in he_start()
1231 he_dev->bytes_per_row = 2048; in he_start()
1232 he_dev->r0_numrows = 256; in he_start()
1233 he_dev->tx_numrows = 512; in he_start()
1234 he_dev->r1_numrows = 256; in he_start()
1235 he_dev->r0_startrow = 0; in he_start()
1236 he_dev->tx_startrow = 256; in he_start()
1237 he_dev->r1_startrow = 768; in he_start()
1239 he_dev->cells_per_row = 20; in he_start()
1240 he_dev->bytes_per_row = 1024; in he_start()
1241 he_dev->r0_numrows = 512; in he_start()
1242 he_dev->tx_numrows = 1018; in he_start()
1243 he_dev->r1_numrows = 512; in he_start()
1244 he_dev->r0_startrow = 6; in he_start()
1245 he_dev->tx_startrow = 518; in he_start()
1246 he_dev->r1_startrow = 1536; in he_start()
1249 he_dev->cells_per_lbuf = 4; in he_start()
1250 he_dev->buffer_limit = 4; in he_start()
1251 he_dev->r0_numbuffs = he_dev->r0_numrows * in he_start()
1252 he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_start()
1253 if (he_dev->r0_numbuffs > 2560) in he_start()
1254 he_dev->r0_numbuffs = 2560; in he_start()
1256 he_dev->r1_numbuffs = he_dev->r1_numrows * in he_start()
1257 he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_start()
1258 if (he_dev->r1_numbuffs > 2560) in he_start()
1259 he_dev->r1_numbuffs = 2560; in he_start()
1261 he_dev->tx_numbuffs = he_dev->tx_numrows * in he_start()
1262 he_dev->cells_per_row / he_dev->cells_per_lbuf; in he_start()
1263 if (he_dev->tx_numbuffs > 5120) in he_start()
1264 he_dev->tx_numbuffs = 5120; in he_start()
1268 he_writel(he_dev, in he_start()
1271 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) | in he_start()
1272 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)), in he_start()
1275 he_writel(he_dev, BANK_ON | in he_start()
1276 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)), in he_start()
1279 he_writel(he_dev, in he_start()
1280 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) | in he_start()
1282 he_writel(he_dev, in he_start()
1283 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) | in he_start()
1286 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG); in he_start()
1288 he_writel(he_dev, in he_start()
1289 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) | in he_start()
1290 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) | in he_start()
1291 RX_VALVP(he_dev->vpibits) | in he_start()
1292 RX_VALVC(he_dev->vcibits), RC_CONFIG); in he_start()
1294 he_writel(he_dev, DRF_THRESH(0x20) | in he_start()
1295 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) | in he_start()
1296 TX_VCI_MASK(he_dev->vcibits) | in he_start()
1297 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG); in he_start()
1299 he_writel(he_dev, 0x0, TXAAL5_PROTO); in he_start()
1301 he_writel(he_dev, PHY_INT_ENB | in he_start()
1302 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)), in he_start()
1308 he_writel_tcm(he_dev, 0, i); in he_start()
1311 he_writel_rcm(he_dev, 0, i); in he_start()
1344 he_writel(he_dev, CONFIG_TSRB, TSRB_BA); in he_start()
1345 he_writel(he_dev, CONFIG_TSRC, TSRC_BA); in he_start()
1346 he_writel(he_dev, CONFIG_TSRD, TSRD_BA); in he_start()
1347 he_writel(he_dev, CONFIG_TMABR, TMABR_BA); in he_start()
1348 he_writel(he_dev, CONFIG_TPDBA, TPD_BA); in he_start()
1378 he_writel(he_dev, 0x08000, RCMLBM_BA); in he_start()
1379 he_writel(he_dev, 0x0e000, RCMRSRB_BA); in he_start()
1380 he_writel(he_dev, 0x0d800, RCMABR_BA); in he_start()
1384 he_init_rx_lbfp0(he_dev); in he_start()
1385 he_init_rx_lbfp1(he_dev); in he_start()
1387 he_writel(he_dev, 0x0, RLBC_H); in he_start()
1388 he_writel(he_dev, 0x0, RLBC_T); in he_start()
1389 he_writel(he_dev, 0x0, RLBC_H2); in he_start()
1391 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */ in he_start()
1392 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */ in he_start()
1394 he_init_tx_lbfp(he_dev); in he_start()
1396 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA); in he_start()
1400 if (he_is622(he_dev)) { in he_start()
1401 he_writel(he_dev, 0x000f, G0_INMQ_S); in he_start()
1402 he_writel(he_dev, 0x200f, G0_INMQ_L); in he_start()
1404 he_writel(he_dev, 0x001f, G1_INMQ_S); in he_start()
1405 he_writel(he_dev, 0x201f, G1_INMQ_L); in he_start()
1407 he_writel(he_dev, 0x002f, G2_INMQ_S); in he_start()
1408 he_writel(he_dev, 0x202f, G2_INMQ_L); in he_start()
1410 he_writel(he_dev, 0x003f, G3_INMQ_S); in he_start()
1411 he_writel(he_dev, 0x203f, G3_INMQ_L); in he_start()
1413 he_writel(he_dev, 0x004f, G4_INMQ_S); in he_start()
1414 he_writel(he_dev, 0x204f, G4_INMQ_L); in he_start()
1416 he_writel(he_dev, 0x005f, G5_INMQ_S); in he_start()
1417 he_writel(he_dev, 0x205f, G5_INMQ_L); in he_start()
1419 he_writel(he_dev, 0x006f, G6_INMQ_S); in he_start()
1420 he_writel(he_dev, 0x206f, G6_INMQ_L); in he_start()
1422 he_writel(he_dev, 0x007f, G7_INMQ_S); in he_start()
1423 he_writel(he_dev, 0x207f, G7_INMQ_L); in he_start()
1425 he_writel(he_dev, 0x0000, G0_INMQ_S); in he_start()
1426 he_writel(he_dev, 0x0008, G0_INMQ_L); in he_start()
1428 he_writel(he_dev, 0x0001, G1_INMQ_S); in he_start()
1429 he_writel(he_dev, 0x0009, G1_INMQ_L); in he_start()
1431 he_writel(he_dev, 0x0002, G2_INMQ_S); in he_start()
1432 he_writel(he_dev, 0x000a, G2_INMQ_L); in he_start()
1434 he_writel(he_dev, 0x0003, G3_INMQ_S); in he_start()
1435 he_writel(he_dev, 0x000b, G3_INMQ_L); in he_start()
1437 he_writel(he_dev, 0x0004, G4_INMQ_S); in he_start()
1438 he_writel(he_dev, 0x000c, G4_INMQ_L); in he_start()
1440 he_writel(he_dev, 0x0005, G5_INMQ_S); in he_start()
1441 he_writel(he_dev, 0x000d, G5_INMQ_L); in he_start()
1443 he_writel(he_dev, 0x0006, G6_INMQ_S); in he_start()
1444 he_writel(he_dev, 0x000e, G6_INMQ_L); in he_start()
1446 he_writel(he_dev, 0x0007, G7_INMQ_S); in he_start()
1447 he_writel(he_dev, 0x000f, G7_INMQ_L); in he_start()
1452 he_writel(he_dev, 0x0, MCC); in he_start()
1453 he_writel(he_dev, 0x0, OEC); in he_start()
1454 he_writel(he_dev, 0x0, DCC); in he_start()
1455 he_writel(he_dev, 0x0, CEC); in he_start()
1459 he_init_cs_block(he_dev); in he_start()
1463 if (he_init_cs_block_rcm(he_dev) < 0) in he_start()
1468 he_init_tpdrq(he_dev); in he_start()
1471 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev, in he_start()
1473 if (he_dev->tpd_pool == NULL) { in he_start()
1478 INIT_LIST_HEAD(&he_dev->outstanding_tpds); in he_start()
1480 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev, in he_start()
1481 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys); in he_start()
1482 if (!he_dev->tpd_base) in he_start()
1486 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT); in he_start()
1487 he_dev->tpd_base[i].inuse = 0; in he_start()
1490 he_dev->tpd_head = he_dev->tpd_base; in he_start()
1491 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1]; in he_start()
1494 if (he_init_group(he_dev, 0) != 0) in he_start()
1498 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); in he_start()
1499 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); in he_start()
1500 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); in he_start()
1501 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), in he_start()
1504 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32)); in he_start()
1505 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32)); in he_start()
1506 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), in he_start()
1508 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32)); in he_start()
1510 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16)); in he_start()
1511 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16)); in he_start()
1512 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0), in he_start()
1514 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16)); in he_start()
1516 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16)); in he_start()
1517 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16)); in he_start()
1518 he_writel(he_dev, TBRQ_THRESH(0x1), in he_start()
1520 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16)); in he_start()
1525 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev, in he_start()
1526 sizeof(struct he_hsp), &he_dev->hsp_phys); in he_start()
1527 if (he_dev->hsp == NULL) { in he_start()
1531 memset(he_dev->hsp, 0, sizeof(struct he_hsp)); in he_start()
1532 he_writel(he_dev, he_dev->hsp_phys, HSP_BA); in he_start()
1537 suni_init(he_dev->atm_dev); in he_start()
1538 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start) in he_start()
1539 he_dev->atm_dev->phy->start(he_dev->atm_dev); in he_start()
1547 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM); in he_start()
1549 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM); in he_start()
1554 reg = he_readl_mbox(he_dev, CS_ERCTL0); in he_start()
1556 he_writel_mbox(he_dev, reg, CS_ERCTL0); in he_start()
1558 reg = he_readl(he_dev, RC_CONFIG); in he_start()
1560 he_writel(he_dev, reg, RC_CONFIG); in he_start()
1563 he_dev->cs_stper[i].inuse = 0; in he_start()
1564 he_dev->cs_stper[i].pcr = -1; in he_start()
1566 he_dev->total_bw = 0; in he_start()
1571 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits; in he_start()
1572 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits; in he_start()
1574 he_dev->irq_peak = 0; in he_start()
1575 he_dev->rbrq_peak = 0; in he_start()
1576 he_dev->rbpl_peak = 0; in he_start()
1577 he_dev->tbrq_peak = 0; in he_start()
1585 he_stop(struct he_dev *he_dev) in he_stop() argument
1591 pci_dev = he_dev->pci_dev; in he_stop()
1595 if (he_dev->membase) { in he_stop()
1601 tasklet_disable(&he_dev->tasklet); in he_stop()
1606 reg = he_readl_mbox(he_dev, CS_ERCTL0); in he_stop()
1608 he_writel_mbox(he_dev, reg, CS_ERCTL0); in he_stop()
1610 reg = he_readl(he_dev, RC_CONFIG); in he_stop()
1612 he_writel(he_dev, reg, RC_CONFIG); in he_stop()
1616 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop) in he_stop()
1617 he_dev->atm_dev->phy->stop(he_dev->atm_dev); in he_stop()
1620 if (he_dev->irq) in he_stop()
1621 free_irq(he_dev->irq, he_dev); in he_stop()
1623 if (he_dev->irq_base) in he_stop()
1624 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1) in he_stop()
1625 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys); in he_stop()
1627 if (he_dev->hsp) in he_stop()
1628 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp), in he_stop()
1629 he_dev->hsp, he_dev->hsp_phys); in he_stop()
1631 if (he_dev->rbpl_base) { in he_stop()
1634 void *cpuaddr = he_dev->rbpl_virt[i].virt; in he_stop()
1635 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys; in he_stop()
1637 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle); in he_stop()
1640 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE in he_stop()
1641 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys); in he_stop()
1643 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE in he_stop()
1644 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); in he_stop()
1648 if (he_dev->rbpl_pool) in he_stop()
1649 pci_pool_destroy(he_dev->rbpl_pool); in he_stop()
1653 if (he_dev->rbps_base) { in he_stop()
1656 void *cpuaddr = he_dev->rbps_virt[i].virt; in he_stop()
1657 dma_addr_t dma_handle = he_dev->rbps_base[i].phys; in he_stop()
1659 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle); in he_stop()
1662 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE in he_stop()
1663 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys); in he_stop()
1665 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE in he_stop()
1666 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys); in he_stop()
1670 if (he_dev->rbps_pool) in he_stop()
1671 pci_pool_destroy(he_dev->rbps_pool); in he_stop()
1676 if (he_dev->rbrq_base) in he_stop()
1677 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), in he_stop()
1678 he_dev->rbrq_base, he_dev->rbrq_phys); in he_stop()
1680 if (he_dev->tbrq_base) in he_stop()
1681 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), in he_stop()
1682 he_dev->tbrq_base, he_dev->tbrq_phys); in he_stop()
1684 if (he_dev->tpdrq_base) in he_stop()
1685 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), in he_stop()
1686 he_dev->tpdrq_base, he_dev->tpdrq_phys); in he_stop()
1689 if (he_dev->tpd_pool) in he_stop()
1690 pci_pool_destroy(he_dev->tpd_pool); in he_stop()
1692 if (he_dev->tpd_base) in he_stop()
1693 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd), in he_stop()
1694 he_dev->tpd_base, he_dev->tpd_base_phys); in he_stop()
1697 if (he_dev->pci_dev) { in he_stop()
1698 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); in he_stop()
1700 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command); in he_stop()
1703 if (he_dev->membase) in he_stop()
1704 iounmap((void *) he_dev->membase); in he_stop()
1708 __alloc_tpd(struct he_dev *he_dev) in __alloc_tpd() argument
1714 tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle); in __alloc_tpd()
1729 ++he_dev->tpd_head; in __alloc_tpd()
1730 if (he_dev->tpd_head > he_dev->tpd_end) { in __alloc_tpd()
1731 he_dev->tpd_head = he_dev->tpd_base; in __alloc_tpd()
1734 if (!he_dev->tpd_head->inuse) { in __alloc_tpd()
1735 he_dev->tpd_head->inuse = 1; in __alloc_tpd()
1736 he_dev->tpd_head->status &= TPD_MASK; in __alloc_tpd()
1737 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0; in __alloc_tpd()
1738 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0; in __alloc_tpd()
1739 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0; in __alloc_tpd()
1740 return he_dev->tpd_head; in __alloc_tpd()
1763 he_service_rbrq(struct he_dev *he_dev, int group) in he_service_rbrq() argument
1766 ((unsigned long)he_dev->rbrq_base | in he_service_rbrq()
1767 he_dev->hsp->group[group].rbrq_tail); in he_service_rbrq()
1779 while (he_dev->rbrq_head != rbrq_tail) { in he_service_rbrq()
1783 he_dev->rbrq_head, group, in he_service_rbrq()
1784 RBRQ_ADDR(he_dev->rbrq_head), in he_service_rbrq()
1785 RBRQ_BUFLEN(he_dev->rbrq_head), in he_service_rbrq()
1786 RBRQ_CID(he_dev->rbrq_head), in he_service_rbrq()
1787 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "", in he_service_rbrq()
1788 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "", in he_service_rbrq()
1789 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "", in he_service_rbrq()
1790 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "", in he_service_rbrq()
1791 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", in he_service_rbrq()
1792 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); in he_service_rbrq()
1795 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF) in he_service_rbrq()
1796 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))]; in he_service_rbrq()
1799 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))]; in he_service_rbrq()
1801 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; in he_service_rbrq()
1802 cid = RBRQ_CID(he_dev->rbrq_head); in he_service_rbrq()
1805 vcc = __find_vcc(he_dev, cid); in he_service_rbrq()
1810 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) in he_service_rbrq()
1819 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) in he_service_rbrq()
1824 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { in he_service_rbrq()
1830 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head); in he_service_rbrq()
1835 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { in he_service_rbrq()
1848 if (!RBRQ_END_PDU(he_dev->rbrq_head)) in he_service_rbrq()
1851 if (RBRQ_LEN_ERR(he_dev->rbrq_head) in he_service_rbrq()
1852 || RBRQ_CRC_ERR(he_dev->rbrq_head)) { in he_service_rbrq()
1854 RBRQ_CRC_ERR(he_dev->rbrq_head) in he_service_rbrq()
1856 RBRQ_LEN_ERR(he_dev->rbrq_head) in he_service_rbrq()
1880 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len); in he_service_rbrq()
1884 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len); in he_service_rbrq()
1927 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)]; in he_service_rbrq()
1930 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)]; in he_service_rbrq()
1939 he_dev->rbrq_head = (struct he_rbrq *) in he_service_rbrq()
1940 ((unsigned long) he_dev->rbrq_base | in he_service_rbrq()
1941 RBRQ_MASK(++he_dev->rbrq_head)); in he_service_rbrq()
1947 if (updated > he_dev->rbrq_peak) in he_service_rbrq()
1948 he_dev->rbrq_peak = updated; in he_service_rbrq()
1950 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head), in he_service_rbrq()
1958 he_service_tbrq(struct he_dev *he_dev, int group) in he_service_tbrq() argument
1961 ((unsigned long)he_dev->tbrq_base | in he_service_tbrq()
1962 he_dev->hsp->group[group].tbrq_tail); in he_service_tbrq()
1971 while (he_dev->tbrq_head != tbrq_tail) { in he_service_tbrq()
1976 TBRQ_TPD(he_dev->tbrq_head), in he_service_tbrq()
1977 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "", in he_service_tbrq()
1978 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : ""); in he_service_tbrq()
1981 p = &he_dev->outstanding_tpds; in he_service_tbrq()
1982 while ((p = p->next) != &he_dev->outstanding_tpds) { in he_service_tbrq()
1984 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) { in he_service_tbrq()
1993 TBRQ_TPD(he_dev->tbrq_head)); in he_service_tbrq()
1997 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ]; in he_service_tbrq()
2000 if (TBRQ_EOS(he_dev->tbrq_head)) { in he_service_tbrq()
2002 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci)); in he_service_tbrq()
2011 pci_unmap_single(he_dev->pci_dev, in he_service_tbrq()
2030 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); in he_service_tbrq()
2034 he_dev->tbrq_head = (struct he_tbrq *) in he_service_tbrq()
2035 ((unsigned long) he_dev->tbrq_base | in he_service_tbrq()
2036 TBRQ_MASK(++he_dev->tbrq_head)); in he_service_tbrq()
2040 if (updated > he_dev->tbrq_peak) in he_service_tbrq()
2041 he_dev->tbrq_peak = updated; in he_service_tbrq()
2043 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head), in he_service_tbrq()
2050 he_service_rbpl(struct he_dev *he_dev, int group) in he_service_rbpl() argument
2056 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | in he_service_rbpl()
2057 RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); in he_service_rbpl()
2060 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | in he_service_rbpl()
2061 RBPL_MASK(he_dev->rbpl_tail+1)); in he_service_rbpl()
2068 he_dev->rbpl_tail = newtail; in he_service_rbpl()
2073 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T); in he_service_rbpl()
2078 he_service_rbps(struct he_dev *he_dev, int group) in he_service_rbps() argument
2084 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base | in he_service_rbps()
2085 RBPS_MASK(he_readl(he_dev, G0_RBPS_S))); in he_service_rbps()
2088 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base | in he_service_rbps()
2089 RBPS_MASK(he_dev->rbps_tail+1)); in he_service_rbps()
2096 he_dev->rbps_tail = newtail; in he_service_rbps()
2101 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T); in he_service_rbps()
2109 struct he_dev *he_dev = (struct he_dev *) data; in he_tasklet() local
2115 spin_lock_irqsave(&he_dev->global_lock, flags); in he_tasklet()
2118 while (he_dev->irq_head != he_dev->irq_tail) { in he_tasklet()
2121 type = ITYPE_TYPE(he_dev->irq_head->isw); in he_tasklet()
2122 group = ITYPE_GROUP(he_dev->irq_head->isw); in he_tasklet()
2129 if (he_service_rbrq(he_dev, group)) { in he_tasklet()
2130 he_service_rbpl(he_dev, group); in he_tasklet()
2132 he_service_rbps(he_dev, group); in he_tasklet()
2140 he_service_tbrq(he_dev, group); in he_tasklet()
2143 he_service_rbpl(he_dev, group); in he_tasklet()
2147 he_service_rbps(he_dev, group); in he_tasklet()
2153 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_tasklet()
2154 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt) in he_tasklet()
2155 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev); in he_tasklet()
2156 spin_lock_irqsave(&he_dev->global_lock, flags); in he_tasklet()
2165 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR)); in he_tasklet()
2172 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw); in he_tasklet()
2174 he_service_rbrq(he_dev, 0); in he_tasklet()
2175 he_service_rbpl(he_dev, 0); in he_tasklet()
2177 he_service_rbps(he_dev, 0); in he_tasklet()
2179 he_service_tbrq(he_dev, 0); in he_tasklet()
2182 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw); in he_tasklet()
2185 he_dev->irq_head->isw = ITYPE_INVALID; in he_tasklet()
2187 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK); in he_tasklet()
2191 if (updated > he_dev->irq_peak) in he_tasklet()
2192 he_dev->irq_peak = updated; in he_tasklet()
2194 he_writel(he_dev, in he_tasklet()
2197 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD); in he_tasklet()
2198 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */ in he_tasklet()
2201 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_tasklet()
2209 struct he_dev *he_dev = (struct he_dev * )dev_id; in he_irq_handler() local
2212 if (he_dev == NULL) in he_irq_handler()
2215 spin_lock_irqsave(&he_dev->global_lock, flags); in he_irq_handler()
2217 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) | in he_irq_handler()
2218 (*he_dev->irq_tailoffset << 2)); in he_irq_handler()
2220 if (he_dev->irq_tail == he_dev->irq_head) { in he_irq_handler()
2222 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base | in he_irq_handler()
2223 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2)); in he_irq_handler()
2224 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */ in he_irq_handler()
2228 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */) in he_irq_handler()
2232 if (he_dev->irq_head != he_dev->irq_tail) { in he_irq_handler()
2235 tasklet_schedule(&he_dev->tasklet); in he_irq_handler()
2237 he_tasklet((unsigned long) he_dev); in he_irq_handler()
2239 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */ in he_irq_handler()
2240 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */ in he_irq_handler()
2242 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_irq_handler()
2248 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) in __enqueue_tpd() argument
2253 tpd, cid, he_dev->tpdrq_tail); in __enqueue_tpd()
2256 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base | in __enqueue_tpd()
2257 TPDRQ_MASK(he_dev->tpdrq_tail+1)); in __enqueue_tpd()
2266 if (new_tail == he_dev->tpdrq_head) { in __enqueue_tpd()
2267 he_dev->tpdrq_head = (struct he_tpdrq *) in __enqueue_tpd()
2268 (((unsigned long)he_dev->tpdrq_base) | in __enqueue_tpd()
2269 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H))); in __enqueue_tpd()
2271 if (new_tail == he_dev->tpdrq_head) { in __enqueue_tpd()
2287 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); in __enqueue_tpd()
2297 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds); in __enqueue_tpd()
2298 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status); in __enqueue_tpd()
2300 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys + in __enqueue_tpd()
2303 he_dev->tpdrq_tail->cid = cid; in __enqueue_tpd()
2306 he_dev->tpdrq_tail = new_tail; in __enqueue_tpd()
2308 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T); in __enqueue_tpd()
2309 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */ in __enqueue_tpd()
2316 struct he_dev *he_dev = HE_DEV(vcc->dev); in he_open() local
2335 cid = he_mkcid(he_dev, vpi, vci); in he_open()
2357 pcr_goal = he_dev->atm_dev->link_rate; in he_open()
2377 spin_lock_irqsave(&he_dev->global_lock, flags); in he_open()
2378 tsr0 = he_readl_tsr0(he_dev, cid); in he_open()
2379 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2399 if ((he_dev->total_bw + pcr_goal) in he_open()
2400 > (he_dev->atm_dev->link_rate * 9 / 10)) in he_open()
2406 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */ in he_open()
2410 if (he_dev->cs_stper[reg].inuse == 0 || in he_open()
2411 he_dev->cs_stper[reg].pcr == pcr_goal) in he_open()
2416 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2420 he_dev->total_bw += pcr_goal; in he_open()
2423 ++he_dev->cs_stper[reg].inuse; in he_open()
2424 he_dev->cs_stper[reg].pcr = pcr_goal; in he_open()
2426 clock = he_is622(he_dev) ? 66667000 : 50000000; in he_open()
2432 he_writel_mbox(he_dev, rate_to_atmf(period/2), in he_open()
2434 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2445 spin_lock_irqsave(&he_dev->global_lock, flags); in he_open()
2447 he_writel_tsr0(he_dev, tsr0, cid); in he_open()
2448 he_writel_tsr4(he_dev, tsr4 | 1, cid); in he_open()
2449 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) | in he_open()
2451 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid); in he_open()
2452 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid); in he_open()
2454 he_writel_tsr3(he_dev, 0x0, cid); in he_open()
2455 he_writel_tsr5(he_dev, 0x0, cid); in he_open()
2456 he_writel_tsr6(he_dev, 0x0, cid); in he_open()
2457 he_writel_tsr7(he_dev, 0x0, cid); in he_open()
2458 he_writel_tsr8(he_dev, 0x0, cid); in he_open()
2459 he_writel_tsr10(he_dev, 0x0, cid); in he_open()
2460 he_writel_tsr11(he_dev, 0x0, cid); in he_open()
2461 he_writel_tsr12(he_dev, 0x0, cid); in he_open()
2462 he_writel_tsr13(he_dev, 0x0, cid); in he_open()
2463 he_writel_tsr14(he_dev, 0x0, cid); in he_open()
2464 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */ in he_open()
2465 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2486 spin_lock_irqsave(&he_dev->global_lock, flags); in he_open()
2488 rsr0 = he_readl_rsr0(he_dev, cid); in he_open()
2490 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2512 he_writel_rsr4(he_dev, rsr4, cid); in he_open()
2513 he_writel_rsr1(he_dev, rsr1, cid); in he_open()
2516 he_writel_rsr0(he_dev, in he_open()
2518 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ in he_open()
2520 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_open()
2541 struct he_dev *he_dev = HE_DEV(vcc->dev); in he_close() local
2551 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); in he_close()
2562 spin_lock_irqsave(&he_dev->global_lock, flags); in he_close()
2563 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) { in he_close()
2571 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid); in he_close()
2572 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ in he_close()
2573 he_writel_mbox(he_dev, cid, RXCON_CLOSE); in he_close()
2574 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_close()
2619 spin_lock_irqsave(&he_dev->global_lock, flags); in he_close()
2620 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid); in he_close()
2625 he_writel_tsr1(he_dev, in he_close()
2630 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid); in he_close()
2633 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */ in he_close()
2635 tpd = __alloc_tpd(he_dev); in he_close()
2647 __enqueue_tpd(he_dev, tpd, cid); in he_close()
2648 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_close()
2655 spin_lock_irqsave(&he_dev->global_lock, flags); in he_close()
2662 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) { in he_close()
2667 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) { in he_close()
2679 if (he_dev->cs_stper[reg].inuse == 0) in he_close()
2682 --he_dev->cs_stper[reg].inuse; in he_close()
2684 he_dev->total_bw -= he_dev->cs_stper[reg].pcr; in he_close()
2686 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_close()
2710 struct he_dev *he_dev = HE_DEV(vcc->dev); in he_send() local
2711 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); in he_send()
2743 spin_lock_irqsave(&he_dev->global_lock, flags); in he_send()
2745 tpd = __alloc_tpd(he_dev); in he_send()
2752 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_send()
2772 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data, in he_send()
2786 __enqueue_tpd(he_dev, tpd, cid); in he_send()
2787 tpd = __alloc_tpd(he_dev); in he_send()
2794 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_send()
2801 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, in he_send()
2811 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); in he_send()
2821 __enqueue_tpd(he_dev, tpd, cid); in he_send()
2822 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_send()
2833 struct he_dev *he_dev = HE_DEV(atm_dev); in he_ioctl() local
2846 spin_lock_irqsave(&he_dev->global_lock, flags); in he_ioctl()
2849 reg.val = he_readl(he_dev, reg.addr); in he_ioctl()
2853 he_readl_rcm(he_dev, reg.addr); in he_ioctl()
2857 he_readl_tcm(he_dev, reg.addr); in he_ioctl()
2861 he_readl_mbox(he_dev, reg.addr); in he_ioctl()
2867 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_ioctl()
2890 struct he_dev *he_dev = HE_DEV(atm_dev); in he_phy_put() local
2894 spin_lock_irqsave(&he_dev->global_lock, flags); in he_phy_put()
2895 he_writel(he_dev, val, FRAMER + (addr*4)); in he_phy_put()
2896 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */ in he_phy_put()
2897 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_phy_put()
2905 struct he_dev *he_dev = HE_DEV(atm_dev); in he_phy_get() local
2908 spin_lock_irqsave(&he_dev->global_lock, flags); in he_phy_get()
2909 reg = he_readl(he_dev, FRAMER + (addr*4)); in he_phy_get()
2910 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_phy_get()
2920 struct he_dev *he_dev = HE_DEV(dev); in he_proc_read() local
2936 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM"); in he_proc_read()
2941 spin_lock_irqsave(&he_dev->global_lock, flags); in he_proc_read()
2942 mcc += he_readl(he_dev, MCC); in he_proc_read()
2943 oec += he_readl(he_dev, OEC); in he_proc_read()
2944 dcc += he_readl(he_dev, DCC); in he_proc_read()
2945 cec += he_readl(he_dev, CEC); in he_proc_read()
2946 spin_unlock_irqrestore(&he_dev->global_lock, flags); in he_proc_read()
2954 CONFIG_IRQ_SIZE, he_dev->irq_peak); in he_proc_read()
2962 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak); in he_proc_read()
2966 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak); in he_proc_read()
2970 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S)); in he_proc_read()
2971 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T)); in he_proc_read()
2989 he_dev->cs_stper[i].pcr, in he_proc_read()
2990 he_dev->cs_stper[i].inuse); in he_proc_read()
2994 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9); in he_proc_read()
3002 read_prom_byte(struct he_dev *he_dev, int addr) in read_prom_byte() argument
3008 val = readl(he_dev->membase + HOST_CNTL); in read_prom_byte()
3013 he_writel(he_dev, val, HOST_CNTL); in read_prom_byte()
3017 he_writel(he_dev, val | readtab[i], HOST_CNTL); in read_prom_byte()
3023 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); in read_prom_byte()
3025 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); in read_prom_byte()
3032 he_writel(he_dev, val, HOST_CNTL); in read_prom_byte()
3036 he_writel(he_dev, val | clocktab[j++], HOST_CNTL); in read_prom_byte()
3038 tmp_read = he_readl(he_dev, HOST_CNTL); in read_prom_byte()
3041 he_writel(he_dev, val | clocktab[j++], HOST_CNTL); in read_prom_byte()
3045 he_writel(he_dev, val | ID_CS, HOST_CNTL); in read_prom_byte()