Lines Matching refs:cp
374 #define cpr8(reg) readb(cp->regs + (reg))
375 #define cpr16(reg) readw(cp->regs + (reg))
376 #define cpr32(reg) readl(cp->regs + (reg))
377 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
378 #define cpw16(reg,val) writew((val), cp->regs + (reg))
379 #define cpw32(reg,val) writel((val), cp->regs + (reg))
381 writeb((val), cp->regs + (reg)); \
382 readb(cp->regs + (reg)); \
385 writew((val), cp->regs + (reg)); \
386 readw(cp->regs + (reg)); \
389 writel((val), cp->regs + (reg)); \
390 readl(cp->regs + (reg)); \
395 static void cp_tx (struct cp_private *cp);
396 static void cp_clean_rings (struct cp_private *cp);
430 struct cp_private *cp = netdev_priv(dev); in cp_vlan_rx_register() local
433 spin_lock_irqsave(&cp->lock, flags); in cp_vlan_rx_register()
434 cp->vlgrp = grp; in cp_vlan_rx_register()
435 cp->cpcmd |= RxVlanOn; in cp_vlan_rx_register()
436 cpw16(CpCmd, cp->cpcmd); in cp_vlan_rx_register()
437 spin_unlock_irqrestore(&cp->lock, flags); in cp_vlan_rx_register()
442 struct cp_private *cp = netdev_priv(dev); in cp_vlan_rx_kill_vid() local
445 spin_lock_irqsave(&cp->lock, flags); in cp_vlan_rx_kill_vid()
446 cp->cpcmd &= ~RxVlanOn; in cp_vlan_rx_kill_vid()
447 cpw16(CpCmd, cp->cpcmd); in cp_vlan_rx_kill_vid()
448 if (cp->vlgrp) in cp_vlan_rx_kill_vid()
449 cp->vlgrp->vlan_devices[vid] = NULL; in cp_vlan_rx_kill_vid()
450 spin_unlock_irqrestore(&cp->lock, flags); in cp_vlan_rx_kill_vid()
454 static inline void cp_set_rxbufsize (struct cp_private *cp) in cp_set_rxbufsize() argument
456 unsigned int mtu = cp->dev->mtu; in cp_set_rxbufsize()
460 cp->rx_buf_sz = mtu + ETH_HLEN + 8; in cp_set_rxbufsize()
462 cp->rx_buf_sz = PKT_BUF_SZ; in cp_set_rxbufsize()
465 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, in cp_rx_skb() argument
468 skb->protocol = eth_type_trans (skb, cp->dev); in cp_rx_skb()
470 cp->net_stats.rx_packets++; in cp_rx_skb()
471 cp->net_stats.rx_bytes += skb->len; in cp_rx_skb()
472 cp->dev->last_rx = jiffies; in cp_rx_skb()
475 if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) { in cp_rx_skb()
476 vlan_hwaccel_receive_skb(skb, cp->vlgrp, in cp_rx_skb()
483 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, in cp_rx_err_acct() argument
486 if (netif_msg_rx_err (cp)) in cp_rx_err_acct()
489 cp->dev->name, rx_tail, status, len); in cp_rx_err_acct()
490 cp->net_stats.rx_errors++; in cp_rx_err_acct()
492 cp->net_stats.rx_frame_errors++; in cp_rx_err_acct()
494 cp->net_stats.rx_crc_errors++; in cp_rx_err_acct()
496 cp->net_stats.rx_length_errors++; in cp_rx_err_acct()
498 cp->net_stats.rx_length_errors++; in cp_rx_err_acct()
500 cp->net_stats.rx_fifo_errors++; in cp_rx_err_acct()
518 struct cp_private *cp = netdev_priv(dev); in cp_rx_poll() local
519 unsigned rx_tail = cp->rx_tail; in cp_rx_poll()
534 skb = cp->rx_skb[rx_tail].skb; in cp_rx_poll()
538 desc = &cp->rx_ring[rx_tail]; in cp_rx_poll()
544 mapping = cp->rx_skb[rx_tail].mapping; in cp_rx_poll()
552 cp_rx_err_acct(cp, rx_tail, status, len); in cp_rx_poll()
553 cp->net_stats.rx_dropped++; in cp_rx_poll()
554 cp->cp_stats.rx_frags++; in cp_rx_poll()
559 cp_rx_err_acct(cp, rx_tail, status, len); in cp_rx_poll()
563 if (netif_msg_rx_status(cp)) in cp_rx_poll()
565 cp->dev->name, rx_tail, status, len); in cp_rx_poll()
567 buflen = cp->rx_buf_sz + RX_OFFSET; in cp_rx_poll()
570 cp->net_stats.rx_dropped++; in cp_rx_poll()
575 new_skb->dev = cp->dev; in cp_rx_poll()
577 pci_unmap_single(cp->pdev, mapping, in cp_rx_poll()
589 cp->rx_skb[rx_tail].mapping = in cp_rx_poll()
590 pci_map_single(cp->pdev, new_skb->tail, in cp_rx_poll()
592 cp->rx_skb[rx_tail].skb = new_skb; in cp_rx_poll()
594 cp_rx_skb(cp, skb, desc); in cp_rx_poll()
598 cp->rx_ring[rx_tail].opts2 = 0; in cp_rx_poll()
599 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); in cp_rx_poll()
602 cp->rx_buf_sz); in cp_rx_poll()
604 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_rx_poll()
611 cp->rx_tail = rx_tail; in cp_rx_poll()
638 struct cp_private *cp; in cp_interrupt() local
643 cp = netdev_priv(dev); in cp_interrupt()
649 if (netif_msg_intr(cp)) in cp_interrupt()
655 spin_lock(&cp->lock); in cp_interrupt()
660 spin_unlock(&cp->lock); in cp_interrupt()
671 cp_tx(cp); in cp_interrupt()
673 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE); in cp_interrupt()
675 spin_unlock(&cp->lock); in cp_interrupt()
680 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); in cp_interrupt()
681 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); in cp_interrupt()
691 static void cp_tx (struct cp_private *cp) in cp_tx() argument
693 unsigned tx_head = cp->tx_head; in cp_tx()
694 unsigned tx_tail = cp->tx_tail; in cp_tx()
701 status = le32_to_cpu(cp->tx_ring[tx_tail].opts1); in cp_tx()
705 skb = cp->tx_skb[tx_tail].skb; in cp_tx()
709 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, in cp_tx()
714 if (netif_msg_tx_err(cp)) in cp_tx()
716 cp->dev->name, status); in cp_tx()
717 cp->net_stats.tx_errors++; in cp_tx()
719 cp->net_stats.tx_window_errors++; in cp_tx()
721 cp->net_stats.tx_aborted_errors++; in cp_tx()
723 cp->net_stats.tx_carrier_errors++; in cp_tx()
725 cp->net_stats.tx_fifo_errors++; in cp_tx()
727 cp->net_stats.collisions += in cp_tx()
729 cp->net_stats.tx_packets++; in cp_tx()
730 cp->net_stats.tx_bytes += skb->len; in cp_tx()
731 if (netif_msg_tx_done(cp)) in cp_tx()
732 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail); in cp_tx()
737 cp->tx_skb[tx_tail].skb = NULL; in cp_tx()
742 cp->tx_tail = tx_tail; in cp_tx()
744 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) in cp_tx()
745 netif_wake_queue(cp->dev); in cp_tx()
750 struct cp_private *cp = netdev_priv(dev); in cp_start_xmit() local
757 spin_lock_irq(&cp->lock); in cp_start_xmit()
760 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { in cp_start_xmit()
762 spin_unlock_irq(&cp->lock); in cp_start_xmit()
769 if (cp->vlgrp && vlan_tx_tag_present(skb)) in cp_start_xmit()
773 entry = cp->tx_head; in cp_start_xmit()
776 struct cp_desc *txd = &cp->tx_ring[entry]; in cp_start_xmit()
781 mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE); in cp_start_xmit()
803 cp->tx_skb[entry].skb = skb; in cp_start_xmit()
804 cp->tx_skb[entry].mapping = mapping; in cp_start_xmit()
805 cp->tx_skb[entry].frag = 0; in cp_start_xmit()
819 first_mapping = pci_map_single(cp->pdev, skb->data, in cp_start_xmit()
821 cp->tx_skb[entry].skb = skb; in cp_start_xmit()
822 cp->tx_skb[entry].mapping = first_mapping; in cp_start_xmit()
823 cp->tx_skb[entry].frag = 1; in cp_start_xmit()
833 mapping = pci_map_single(cp->pdev, in cp_start_xmit()
853 txd = &cp->tx_ring[entry]; in cp_start_xmit()
861 cp->tx_skb[entry].skb = skb; in cp_start_xmit()
862 cp->tx_skb[entry].mapping = mapping; in cp_start_xmit()
863 cp->tx_skb[entry].frag = frag + 2; in cp_start_xmit()
867 txd = &cp->tx_ring[first_entry]; in cp_start_xmit()
888 cp->tx_head = entry; in cp_start_xmit()
889 if (netif_msg_tx_queued(cp)) in cp_start_xmit()
892 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) in cp_start_xmit()
895 spin_unlock_irq(&cp->lock); in cp_start_xmit()
908 struct cp_private *cp = netdev_priv(dev); in __cp_set_rx_mode() local
942 if (cp->rx_config != tmp) { in __cp_set_rx_mode()
944 cp->rx_config = tmp; in __cp_set_rx_mode()
953 struct cp_private *cp = netdev_priv(dev); in cp_set_rx_mode() local
955 spin_lock_irqsave (&cp->lock, flags); in cp_set_rx_mode()
957 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_rx_mode()
960 static void __cp_get_stats(struct cp_private *cp) in __cp_get_stats() argument
963 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); in __cp_get_stats()
969 struct cp_private *cp = netdev_priv(dev); in cp_get_stats() local
973 spin_lock_irqsave(&cp->lock, flags); in cp_get_stats()
975 __cp_get_stats(cp); in cp_get_stats()
976 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_stats()
978 return &cp->net_stats; in cp_get_stats()
981 static void cp_stop_hw (struct cp_private *cp) in cp_stop_hw() argument
989 cp->rx_tail = 0; in cp_stop_hw()
990 cp->tx_head = cp->tx_tail = 0; in cp_stop_hw()
993 static void cp_reset_hw (struct cp_private *cp) in cp_reset_hw() argument
1007 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name); in cp_reset_hw()
1010 static inline void cp_start_hw (struct cp_private *cp) in cp_start_hw() argument
1012 cpw16(CpCmd, cp->cpcmd); in cp_start_hw()
1016 static void cp_init_hw (struct cp_private *cp) in cp_init_hw() argument
1018 struct net_device *dev = cp->dev; in cp_init_hw()
1021 cp_reset_hw(cp); in cp_init_hw()
1029 cp_start_hw(cp); in cp_init_hw()
1038 cp->wol_enabled = 0; in cp_init_hw()
1045 ring_dma = cp->ring_dma; in cp_init_hw()
1060 static int cp_refill_rx (struct cp_private *cp) in cp_refill_rx() argument
1067 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); in cp_refill_rx()
1071 skb->dev = cp->dev; in cp_refill_rx()
1074 cp->rx_skb[i].mapping = pci_map_single(cp->pdev, in cp_refill_rx()
1075 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_refill_rx()
1076 cp->rx_skb[i].skb = skb; in cp_refill_rx()
1077 cp->rx_skb[i].frag = 0; in cp_refill_rx()
1079 cp->rx_ring[i].opts2 = 0; in cp_refill_rx()
1080 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); in cp_refill_rx()
1082 cp->rx_ring[i].opts1 = in cp_refill_rx()
1083 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); in cp_refill_rx()
1085 cp->rx_ring[i].opts1 = in cp_refill_rx()
1086 cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_refill_rx()
1092 cp_clean_rings(cp); in cp_refill_rx()
1096 static int cp_init_rings (struct cp_private *cp) in cp_init_rings() argument
1098 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_init_rings()
1099 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); in cp_init_rings()
1101 cp->rx_tail = 0; in cp_init_rings()
1102 cp->tx_head = cp->tx_tail = 0; in cp_init_rings()
1104 return cp_refill_rx (cp); in cp_init_rings()
1107 static int cp_alloc_rings (struct cp_private *cp) in cp_alloc_rings() argument
1111 mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma); in cp_alloc_rings()
1115 cp->rx_ring = mem; in cp_alloc_rings()
1116 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; in cp_alloc_rings()
1119 cp->nic_stats = mem; in cp_alloc_rings()
1120 cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE); in cp_alloc_rings()
1122 return cp_init_rings(cp); in cp_alloc_rings()
1125 static void cp_clean_rings (struct cp_private *cp) in cp_clean_rings() argument
1129 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); in cp_clean_rings()
1130 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_clean_rings()
1133 if (cp->rx_skb[i].skb) { in cp_clean_rings()
1134 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, in cp_clean_rings()
1135 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_clean_rings()
1136 dev_kfree_skb(cp->rx_skb[i].skb); in cp_clean_rings()
1141 if (cp->tx_skb[i].skb) { in cp_clean_rings()
1142 struct sk_buff *skb = cp->tx_skb[i].skb; in cp_clean_rings()
1143 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, in cp_clean_rings()
1146 cp->net_stats.tx_dropped++; in cp_clean_rings()
1150 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); in cp_clean_rings()
1151 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); in cp_clean_rings()
1154 static void cp_free_rings (struct cp_private *cp) in cp_free_rings() argument
1156 cp_clean_rings(cp); in cp_free_rings()
1157 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); in cp_free_rings()
1158 cp->rx_ring = NULL; in cp_free_rings()
1159 cp->tx_ring = NULL; in cp_free_rings()
1160 cp->nic_stats = NULL; in cp_free_rings()
1165 struct cp_private *cp = netdev_priv(dev); in cp_open() local
1168 if (netif_msg_ifup(cp)) in cp_open()
1171 rc = cp_alloc_rings(cp); in cp_open()
1175 cp_init_hw(cp); in cp_open()
1182 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE); in cp_open()
1188 cp_stop_hw(cp); in cp_open()
1189 cp_free_rings(cp); in cp_open()
1195 struct cp_private *cp = netdev_priv(dev); in cp_close() local
1198 if (netif_msg_ifdown(cp)) in cp_close()
1201 spin_lock_irqsave(&cp->lock, flags); in cp_close()
1206 cp_stop_hw(cp); in cp_close()
1208 spin_unlock_irqrestore(&cp->lock, flags); in cp_close()
1213 cp_free_rings(cp); in cp_close()
1220 struct cp_private *cp = netdev_priv(dev); in cp_change_mtu() local
1231 cp_set_rxbufsize(cp); /* set new rx buf size */ in cp_change_mtu()
1235 spin_lock_irqsave(&cp->lock, flags); in cp_change_mtu()
1237 cp_stop_hw(cp); /* stop h/w and free rings */ in cp_change_mtu()
1238 cp_clean_rings(cp); in cp_change_mtu()
1241 cp_set_rxbufsize(cp); /* set new rx buf size */ in cp_change_mtu()
1243 rc = cp_init_rings(cp); /* realloc and restart h/w */ in cp_change_mtu()
1244 cp_start_hw(cp); in cp_change_mtu()
1246 spin_unlock_irqrestore(&cp->lock, flags); in cp_change_mtu()
1265 struct cp_private *cp = netdev_priv(dev); in mdio_read() local
1268 readw(cp->regs + mii_2_8139_map[location]) : 0; in mdio_read()
1275 struct cp_private *cp = netdev_priv(dev); in mdio_write() local
1286 static int netdev_set_wol (struct cp_private *cp, in netdev_set_wol() argument
1313 cp->wol_enabled = (wol->wolopts) ? 1 : 0; in netdev_set_wol()
1319 static void netdev_get_wol (struct cp_private *cp, in netdev_get_wol() argument
1328 if (!cp->wol_enabled) return; in netdev_get_wol()
1343 struct cp_private *cp = netdev_priv(dev); in cp_get_drvinfo() local
1347 strcpy (info->bus_info, pci_name(cp->pdev)); in cp_get_drvinfo()
1362 struct cp_private *cp = netdev_priv(dev); in cp_get_settings() local
1366 spin_lock_irqsave(&cp->lock, flags); in cp_get_settings()
1367 rc = mii_ethtool_gset(&cp->mii_if, cmd); in cp_get_settings()
1368 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_settings()
1375 struct cp_private *cp = netdev_priv(dev); in cp_set_settings() local
1379 spin_lock_irqsave(&cp->lock, flags); in cp_set_settings()
1380 rc = mii_ethtool_sset(&cp->mii_if, cmd); in cp_set_settings()
1381 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_settings()
1388 struct cp_private *cp = netdev_priv(dev); in cp_nway_reset() local
1389 return mii_nway_restart(&cp->mii_if); in cp_nway_reset()
1394 struct cp_private *cp = netdev_priv(dev); in cp_get_msglevel() local
1395 return cp->msg_enable; in cp_get_msglevel()
1400 struct cp_private *cp = netdev_priv(dev); in cp_set_msglevel() local
1401 cp->msg_enable = value; in cp_set_msglevel()
1406 struct cp_private *cp = netdev_priv(dev); in cp_get_rx_csum() local
1412 struct cp_private *cp = netdev_priv(dev); in cp_set_rx_csum() local
1413 u16 cmd = cp->cpcmd, newcmd; in cp_set_rx_csum()
1425 spin_lock_irqsave(&cp->lock, flags); in cp_set_rx_csum()
1426 cp->cpcmd = newcmd; in cp_set_rx_csum()
1428 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_rx_csum()
1437 struct cp_private *cp = netdev_priv(dev); in cp_get_regs() local
1445 spin_lock_irqsave(&cp->lock, flags); in cp_get_regs()
1446 memcpy_fromio(p, cp->regs, CP_REGS_SIZE); in cp_get_regs()
1447 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_regs()
1452 struct cp_private *cp = netdev_priv(dev); in cp_get_wol() local
1455 spin_lock_irqsave (&cp->lock, flags); in cp_get_wol()
1456 netdev_get_wol (cp, wol); in cp_get_wol()
1457 spin_unlock_irqrestore (&cp->lock, flags); in cp_get_wol()
1462 struct cp_private *cp = netdev_priv(dev); in cp_set_wol() local
1466 spin_lock_irqsave (&cp->lock, flags); in cp_set_wol()
1467 rc = netdev_set_wol (cp, wol); in cp_set_wol()
1468 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_wol()
1488 struct cp_private *cp = netdev_priv(dev); in cp_get_ethtool_stats() local
1493 cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16); in cp_get_ethtool_stats()
1494 cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats); in cp_get_ethtool_stats()
1507 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok); in cp_get_ethtool_stats()
1508 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok); in cp_get_ethtool_stats()
1509 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err); in cp_get_ethtool_stats()
1510 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err); in cp_get_ethtool_stats()
1511 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo); in cp_get_ethtool_stats()
1512 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align); in cp_get_ethtool_stats()
1513 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col); in cp_get_ethtool_stats()
1514 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol); in cp_get_ethtool_stats()
1515 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys); in cp_get_ethtool_stats()
1516 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast); in cp_get_ethtool_stats()
1517 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast); in cp_get_ethtool_stats()
1518 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort); in cp_get_ethtool_stats()
1519 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun); in cp_get_ethtool_stats()
1520 tmp_stats[i++] = cp->cp_stats.rx_frags; in cp_get_ethtool_stats()
1550 struct cp_private *cp = netdev_priv(dev); in cp_ioctl() local
1558 spin_lock_irqsave(&cp->lock, flags); in cp_ioctl()
1559 rc = generic_mii_ioctl(&cp->mii_if, mii, cmd, NULL); in cp_ioctl()
1560 spin_unlock_irqrestore(&cp->lock, flags); in cp_ioctl()
1626 static void cp_set_d3_state (struct cp_private *cp) in cp_set_d3_state() argument
1628 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */ in cp_set_d3_state()
1629 pci_set_power_state (cp->pdev, 3); in cp_set_d3_state()
1635 struct cp_private *cp; in cp_init_one() local
1664 cp = netdev_priv(dev); in cp_init_one()
1665 cp->pdev = pdev; in cp_init_one()
1666 cp->dev = dev; in cp_init_one()
1667 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); in cp_init_one()
1668 spin_lock_init (&cp->lock); in cp_init_one()
1669 cp->mii_if.dev = dev; in cp_init_one()
1670 cp->mii_if.mdio_read = mdio_read; in cp_init_one()
1671 cp->mii_if.mdio_write = mdio_write; in cp_init_one()
1672 cp->mii_if.phy_id = CP_INTERNAL_PHY; in cp_init_one()
1673 cp->mii_if.phy_id_mask = 0x1f; in cp_init_one()
1674 cp->mii_if.reg_num_mask = 0x1f; in cp_init_one()
1675 cp_set_rxbufsize(cp); in cp_init_one()
1718 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | in cp_init_one()
1729 cp->regs = regs; in cp_init_one()
1731 cp_stop_hw(cp); in cp_init_one()
1786 if (cp->wol_enabled) cp_set_d3_state (cp); in cp_init_one()
1806 struct cp_private *cp = netdev_priv(dev); in cp_remove_one() local
1811 iounmap(cp->regs); in cp_remove_one()
1812 if (cp->wol_enabled) pci_set_power_state (pdev, 0); in cp_remove_one()
1824 struct cp_private *cp; in cp_suspend() local
1828 cp = netdev_priv(dev); in cp_suspend()
1835 spin_lock_irqsave (&cp->lock, flags); in cp_suspend()
1841 spin_unlock_irqrestore (&cp->lock, flags); in cp_suspend()
1843 if (cp->pdev && cp->wol_enabled) { in cp_suspend()
1844 pci_save_state (cp->pdev, cp->power_state); in cp_suspend()
1845 cp_set_d3_state (cp); in cp_suspend()
1854 struct cp_private *cp; in cp_resume() local
1857 cp = netdev_priv(dev); in cp_resume()
1861 if (cp->pdev && cp->wol_enabled) { in cp_resume()
1862 pci_set_power_state (cp->pdev, 0); in cp_resume()
1863 pci_restore_state (cp->pdev, cp->power_state); in cp_resume()
1866 cp_init_hw (cp); in cp_resume()