Lines Matching refs:yp
622 struct yellowfin_private *yp = dev->priv; in yellowfin_open() local
638 outl(yp->rx_ring_dma, ioaddr + RxPtr); in yellowfin_open()
639 outl(yp->tx_ring_dma, ioaddr + TxPtr); in yellowfin_open()
660 yp->tx_threshold = 32; in yellowfin_open()
661 outl(yp->tx_threshold, ioaddr + TxThreshold); in yellowfin_open()
664 dev->if_port = yp->default_port; in yellowfin_open()
669 if (yp->drv_flags & IsGigabit) { in yellowfin_open()
671 yp->full_duplex = 1; in yellowfin_open()
676 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); in yellowfin_open()
692 init_timer(&yp->timer); in yellowfin_open()
693 yp->timer.expires = jiffies + 3*HZ; in yellowfin_open()
694 yp->timer.data = (unsigned long)dev; in yellowfin_open()
695 yp->timer.function = &yellowfin_timer; /* timer handler */ in yellowfin_open()
696 add_timer(&yp->timer); in yellowfin_open()
704 struct yellowfin_private *yp = dev->priv; in yellowfin_timer() local
713 if (yp->mii_cnt) { in yellowfin_timer()
714 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR); in yellowfin_timer()
715 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA); in yellowfin_timer()
716 int negotiated = lpa & yp->advertising; in yellowfin_timer()
720 dev->name, yp->phys[0], bmsr, lpa); in yellowfin_timer()
722 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated); in yellowfin_timer()
724 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); in yellowfin_timer()
732 yp->timer.expires = jiffies + next_tick; in yellowfin_timer()
733 add_timer(&yp->timer); in yellowfin_timer()
738 struct yellowfin_private *yp = dev->priv; in yellowfin_tx_timeout() local
743 dev->name, yp->cur_tx, yp->dirty_tx, in yellowfin_tx_timeout()
749 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring); in yellowfin_tx_timeout()
751 printk(" %8.8x", yp->rx_ring[i].result_status); in yellowfin_tx_timeout()
752 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring); in yellowfin_tx_timeout()
754 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs, in yellowfin_tx_timeout()
755 yp->tx_ring[i].result_status); in yellowfin_tx_timeout()
765 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) in yellowfin_tx_timeout()
769 yp->stats.tx_errors++; in yellowfin_tx_timeout()
775 struct yellowfin_private *yp = dev->priv; in yellowfin_init_ring() local
778 yp->tx_full = 0; in yellowfin_init_ring()
779 yp->cur_rx = yp->cur_tx = 0; in yellowfin_init_ring()
780 yp->dirty_tx = 0; in yellowfin_init_ring()
782 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in yellowfin_init_ring()
785 yp->rx_ring[i].dbdma_cmd = in yellowfin_init_ring()
786 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); in yellowfin_init_ring()
787 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma + in yellowfin_init_ring()
792 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz); in yellowfin_init_ring()
793 yp->rx_skbuff[i] = skb; in yellowfin_init_ring()
798 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, in yellowfin_init_ring()
799 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); in yellowfin_init_ring()
801 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
802 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in yellowfin_init_ring()
808 yp->tx_skbuff[i] = 0; in yellowfin_init_ring()
809 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
810 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
814 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); in yellowfin_init_ring()
822 yp->tx_skbuff[i] = 0; in yellowfin_init_ring()
824 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
825 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
828 if (yp->flags & FullTxStatus) { in yellowfin_init_ring()
829 yp->tx_ring[j].dbdma_cmd = in yellowfin_init_ring()
830 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); in yellowfin_init_ring()
831 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); in yellowfin_init_ring()
832 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + in yellowfin_init_ring()
836 yp->tx_ring[j].dbdma_cmd = in yellowfin_init_ring()
838 yp->tx_ring[j].request_cnt = 2; in yellowfin_init_ring()
840 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + in yellowfin_init_ring()
842 &(yp->tx_status[0].tx_errs) - in yellowfin_init_ring()
843 &(yp->tx_status[0])); in yellowfin_init_ring()
845 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
849 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS); in yellowfin_init_ring()
852 yp->tx_tail_desc = &yp->tx_status[0]; in yellowfin_init_ring()
858 struct yellowfin_private *yp = dev->priv;
868 entry = yp->cur_tx % TX_RING_SIZE;
881 yp->tx_skbuff[entry] = NULL;
886 yp->tx_skbuff[entry] = skb;
889 yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
891 yp->tx_ring[entry].result_status = 0;
894 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
895 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
898 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
899 yp->tx_ring[entry].dbdma_cmd =
902 yp->cur_tx++;
904 yp->tx_ring[entry<<1].request_cnt = len;
905 yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
910 yp->cur_tx++;
912 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
913 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
917 yp->tx_ring[entry<<1].dbdma_cmd =
927 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
930 yp->tx_full = 1;
935 dev->name, yp->cur_tx, entry);
945 struct yellowfin_private *yp;
957 yp = dev->priv;
959 spin_lock (&yp->lock);
977 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
978 int entry = yp->dirty_tx % TX_RING_SIZE;
981 if (yp->tx_ring[entry].result_status == 0)
983 skb = yp->tx_skbuff[entry];
984 yp->stats.tx_packets++;
985 yp->stats.tx_bytes += skb->len;
987 pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
990 yp->tx_skbuff[entry] = 0;
992 if (yp->tx_full
993 && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
995 yp->tx_full = 0;
999 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
1000 unsigned dirty_tx = yp->dirty_tx;
1002 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
1006 u16 tx_errs = yp->tx_status[entry].tx_errs;
1014 yp->tx_status[entry].tx_cnt,
1015 yp->tx_status[entry].tx_errs,
1016 yp->tx_status[entry].total_tx_cnt,
1017 yp->tx_status[entry].paused);
1021 skb = yp->tx_skbuff[entry];
1029 yp->stats.tx_errors++;
1030 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
1031 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
1032 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
1033 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
1040 yp->stats.tx_bytes += skb->len;
1041 yp->stats.collisions += tx_errs & 15;
1042 yp->stats.tx_packets++;
1045 pci_unmap_single(yp->pci_dev,
1046 yp->tx_ring[entry<<1].addr, skb->len,
1049 yp->tx_skbuff[entry] = 0;
1051 yp->tx_status[entry].tx_errs = 0;
1055 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1057 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
1062 if (yp->tx_full
1063 && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1065 yp->tx_full = 0;
1069 yp->dirty_tx = dirty_tx;
1070 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1090 spin_unlock (&yp->lock);
1098 struct yellowfin_private *yp = dev->priv;
1099 int entry = yp->cur_rx % RX_RING_SIZE;
1100 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1104 entry, yp->rx_ring[entry].result_status);
1106 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1107 yp->rx_ring[entry].result_status);
1112 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1113 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1121 pci_dma_sync_single(yp->pci_dev, desc->addr,
1122 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1136 yp->stats.rx_length_errors++;
1137 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1142 yp->stats.rx_errors++;
1143 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
1144 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
1145 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
1146 if (frame_status < 0) yp->stats.rx_dropped++;
1147 } else if ( !(yp->drv_flags & IsGigabit) &&
1151 yp->stats.rx_errors++;
1152 if (status1 & 0xC0) yp->stats.rx_length_errors++;
1153 if (status2 & 0x03) yp->stats.rx_frame_errors++;
1154 if (status2 & 0x04) yp->stats.rx_crc_errors++;
1155 if (status2 & 0x80) yp->stats.rx_dropped++;
1157 } else if ((yp->flags & HasMACAddrBug) &&
1158 memcmp(le32_to_cpu(yp->rx_ring_dma +
1161 memcmp(le32_to_cpu(yp->rx_ring_dma +
1173 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1186 pci_unmap_single(yp->pci_dev,
1187 yp->rx_ring[entry].addr,
1188 yp->rx_buf_sz,
1190 yp->rx_skbuff[entry] = NULL;
1208 yp->stats.rx_packets++;
1209 yp->stats.rx_bytes += pkt_len;
1211 entry = (++yp->cur_rx) % RX_RING_SIZE;
1215 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1216 entry = yp->dirty_rx % RX_RING_SIZE;
1217 if (yp->rx_skbuff[entry] == NULL) {
1218 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1221 yp->rx_skbuff[entry] = skb;
1224 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1225 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1227 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1228 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1230 yp->rx_ring[entry - 1].dbdma_cmd =
1231 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1233 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1235 | yp->rx_buf_sz);
1243 struct yellowfin_private *yp = dev->priv;
1249 yp->stats.tx_errors++;
1251 yp->stats.rx_errors++;
1257 struct yellowfin_private *yp = dev->priv;
1268 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1278 del_timer(&yp->timer);
1282 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", yp->tx_ring_dma);
1285 inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1286 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1287 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1288 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1291 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1292 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1294 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", yp->rx_ring_dma);
1297 inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1298 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1299 yp->rx_ring[i].result_status);
1301 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1305 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1317 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1318 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1319 if (yp->rx_skbuff[i]) {
1320 dev_kfree_skb(yp->rx_skbuff[i]);
1322 yp->rx_skbuff[i] = 0;
1325 if (yp->tx_skbuff[i])
1326 dev_kfree_skb(yp->tx_skbuff[i]);
1327 yp->tx_skbuff[i] = 0;
1342 struct yellowfin_private *yp = dev->priv;
1343 return &yp->stats;
1350 struct yellowfin_private *yp = dev->priv;
1372 if (yp->drv_flags & HasMulticastBug) {