Lines Matching refs:tp
61 struct tulip_private *tp = netdev_priv(dev); in tulip_refill_rx() local
66 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) { in tulip_refill_rx()
67 entry = tp->dirty_rx % RX_RING_SIZE; in tulip_refill_rx()
68 if (tp->rx_buffers[entry].skb == NULL) { in tulip_refill_rx()
72 skb = tp->rx_buffers[entry].skb = in tulip_refill_rx()
77 mapping = dma_map_single(&tp->pdev->dev, skb->data, in tulip_refill_rx()
79 if (dma_mapping_error(&tp->pdev->dev, mapping)) { in tulip_refill_rx()
81 tp->rx_buffers[entry].skb = NULL; in tulip_refill_rx()
85 tp->rx_buffers[entry].mapping = mapping; in tulip_refill_rx()
87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); in tulip_refill_rx()
90 tp->rx_ring[entry].status = cpu_to_le32(DescOwned); in tulip_refill_rx()
92 if(tp->chip_id == LC82C168) { in tulip_refill_rx()
93 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) { in tulip_refill_rx()
97 iowrite32(0x01, tp->base_addr + CSR2); in tulip_refill_rx()
107 struct tulip_private *tp = from_timer(tp, t, oom_timer); in oom_timer() local
109 napi_schedule(&tp->napi); in oom_timer()
114 struct tulip_private *tp = container_of(napi, struct tulip_private, napi); in tulip_poll() local
115 struct net_device *dev = tp->dev; in tulip_poll()
116 int entry = tp->cur_rx % RX_RING_SIZE; in tulip_poll()
132 entry, tp->rx_ring[entry].status); in tulip_poll()
135 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { in tulip_poll()
140 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); in tulip_poll()
144 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { in tulip_poll()
145 s32 status = le32_to_cpu(tp->rx_ring[entry].status); in tulip_poll()
148 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) in tulip_poll()
213 dma_sync_single_for_cpu(&tp->pdev->dev, in tulip_poll()
214 tp->rx_buffers[entry].mapping, in tulip_poll()
218 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, in tulip_poll()
223 tp->rx_buffers[entry].skb->data, in tulip_poll()
226 dma_sync_single_for_device(&tp->pdev->dev, in tulip_poll()
227 tp->rx_buffers[entry].mapping, in tulip_poll()
231 char *temp = skb_put(skb = tp->rx_buffers[entry].skb, in tulip_poll()
235 if (tp->rx_buffers[entry].mapping != in tulip_poll()
236 le32_to_cpu(tp->rx_ring[entry].buffer1)) { in tulip_poll()
239 le32_to_cpu(tp->rx_ring[entry].buffer1), in tulip_poll()
240 (unsigned long long)tp->rx_buffers[entry].mapping, in tulip_poll()
245 dma_unmap_single(&tp->pdev->dev, in tulip_poll()
246 tp->rx_buffers[entry].mapping, in tulip_poll()
250 tp->rx_buffers[entry].skb = NULL; in tulip_poll()
251 tp->rx_buffers[entry].mapping = 0; in tulip_poll()
264 entry = (++tp->cur_rx) % RX_RING_SIZE; in tulip_poll()
265 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) in tulip_poll()
280 } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); in tulip_poll()
301 if( tp->flags & HAS_INTR_MITIGATION) { in tulip_poll()
303 if( ! tp->mit_on ) { in tulip_poll()
304 tp->mit_on = 1; in tulip_poll()
305 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11); in tulip_poll()
309 if( tp->mit_on ) { in tulip_poll()
310 tp->mit_on = 0; in tulip_poll()
311 iowrite32(0, tp->base_addr + CSR11); in tulip_poll()
321 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) in tulip_poll()
327 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); in tulip_poll()
343 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || in tulip_poll()
344 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) in tulip_poll()
347 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) in tulip_poll()
355 mod_timer(&tp->oom_timer, jiffies+1); in tulip_poll()
371 struct tulip_private *tp = netdev_priv(dev); in tulip_rx() local
372 int entry = tp->cur_rx % RX_RING_SIZE; in tulip_rx()
373 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; in tulip_rx()
378 entry, tp->rx_ring[entry].status); in tulip_rx()
380 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { in tulip_rx()
381 s32 status = le32_to_cpu(tp->rx_ring[entry].status); in tulip_rx()
443 dma_sync_single_for_cpu(&tp->pdev->dev, in tulip_rx()
444 tp->rx_buffers[entry].mapping, in tulip_rx()
448 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, in tulip_rx()
453 tp->rx_buffers[entry].skb->data, in tulip_rx()
456 dma_sync_single_for_device(&tp->pdev->dev, in tulip_rx()
457 tp->rx_buffers[entry].mapping, in tulip_rx()
461 char *temp = skb_put(skb = tp->rx_buffers[entry].skb, in tulip_rx()
465 if (tp->rx_buffers[entry].mapping != in tulip_rx()
466 le32_to_cpu(tp->rx_ring[entry].buffer1)) { in tulip_rx()
469 le32_to_cpu(tp->rx_ring[entry].buffer1), in tulip_rx()
470 (long long)tp->rx_buffers[entry].mapping, in tulip_rx()
475 dma_unmap_single(&tp->pdev->dev, in tulip_rx()
476 tp->rx_buffers[entry].mapping, in tulip_rx()
479 tp->rx_buffers[entry].skb = NULL; in tulip_rx()
480 tp->rx_buffers[entry].mapping = 0; in tulip_rx()
490 entry = (++tp->cur_rx) % RX_RING_SIZE; in tulip_rx()
499 struct tulip_private *tp = netdev_priv(dev); in phy_interrupt() local
500 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff; in phy_interrupt()
502 if (csr12 != tp->csr12_shadow) { in phy_interrupt()
504 iowrite32(csr12 | 0x02, tp->base_addr + CSR12); in phy_interrupt()
505 tp->csr12_shadow = csr12; in phy_interrupt()
507 spin_lock(&tp->lock); in phy_interrupt()
509 spin_unlock(&tp->lock); in phy_interrupt()
511 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12); in phy_interrupt()
525 struct tulip_private *tp = netdev_priv(dev); in tulip_interrupt() local
526 void __iomem *ioaddr = tp->base_addr; in tulip_interrupt()
546 if (tp->flags & HAS_PHY_IRQ) in tulip_interrupt()
552 tp->nir++; in tulip_interrupt()
561 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); in tulip_interrupt()
562 napi_schedule(&tp->napi); in tulip_interrupt()
593 spin_lock(&tp->lock); in tulip_interrupt()
595 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; in tulip_interrupt()
598 int status = le32_to_cpu(tp->tx_ring[entry].status); in tulip_interrupt()
604 if (tp->tx_buffers[entry].skb == NULL) { in tulip_interrupt()
606 if (tp->tx_buffers[entry].mapping) in tulip_interrupt()
607 dma_unmap_single(&tp->pdev->dev, in tulip_interrupt()
608 tp->tx_buffers[entry].mapping, in tulip_interrupt()
609 sizeof(tp->setup_frame), in tulip_interrupt()
630 if ((status & 0x0080) && tp->full_duplex == 0) in tulip_interrupt()
634 tp->tx_buffers[entry].skb->len; in tulip_interrupt()
639 dma_unmap_single(&tp->pdev->dev, in tulip_interrupt()
640 tp->tx_buffers[entry].mapping, in tulip_interrupt()
641 tp->tx_buffers[entry].skb->len, in tulip_interrupt()
645 dev_kfree_skb_irq(tp->tx_buffers[entry].skb); in tulip_interrupt()
646 tp->tx_buffers[entry].skb = NULL; in tulip_interrupt()
647 tp->tx_buffers[entry].mapping = 0; in tulip_interrupt()
652 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { in tulip_interrupt()
655 dirty_tx, tp->cur_tx); in tulip_interrupt()
660 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) in tulip_interrupt()
663 tp->dirty_tx = dirty_tx; in tulip_interrupt()
669 tp->csr6); in tulip_interrupt()
670 tulip_restart_rxtx(tp); in tulip_interrupt()
672 spin_unlock(&tp->lock); in tulip_interrupt()
682 if ((tp->csr6 & 0xC000) != 0xC000) in tulip_interrupt()
683 tp->csr6 += 0x4000; /* Bump up the Tx threshold */ in tulip_interrupt()
685 tp->csr6 |= 0x00200000; /* Store-n-forward. */ in tulip_interrupt()
687 tulip_restart_rxtx(tp); in tulip_interrupt()
691 if (tp->flags & COMET_MAC_ADDR) { in tulip_interrupt()
692 iowrite32(tp->mc_filter[0], ioaddr + 0xAC); in tulip_interrupt()
693 iowrite32(tp->mc_filter[1], ioaddr + 0xB0); in tulip_interrupt()
699 tulip_start_rxtx(tp); in tulip_interrupt()
706 if (tp->link_change) in tulip_interrupt()
707 (tp->link_change)(dev, csr5); in tulip_interrupt()
723 tp->nir, error); in tulip_interrupt()
735 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); in tulip_interrupt()
736 tp->ttimer = 0; in tulip_interrupt()
742 csr5, tp->nir, tx, rx, oi); in tulip_interrupt()
746 if (tp->flags & HAS_INTR_MITIGATION) { in tulip_interrupt()
750 } else if (tp->chip_id == LC82C168) { in tulip_interrupt()
753 mod_timer(&tp->timer, RUN_AT(HZ/50)); in tulip_interrupt()
788 entry = tp->dirty_rx % RX_RING_SIZE;
789 if (tp->rx_buffers[entry].skb == NULL) {
793 tp->nir, tp->cur_rx, tp->ttimer, rx);
794 if (tp->chip_id == LC82C168) {
796 mod_timer(&tp->timer, RUN_AT(HZ/50));
798 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
802 tp->nir);
803 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
807 tp->ttimer = 1;