Lines Matching refs:bp

57 #define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\  argument
58 * (bp)->rx_ring_size)
63 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ argument
64 * (bp)->tx_ring_size)
67 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) argument
130 static unsigned int macb_dma_desc_get_size(struct macb *bp) in macb_dma_desc_get_size() argument
135 switch (bp->hw_dma_cap) { in macb_dma_desc_get_size()
157 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) in macb_adj_dma_desc_idx() argument
160 switch (bp->hw_dma_cap) { in macb_adj_dma_desc_idx()
176 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) in macb_64b_desc() argument
184 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) in macb_tx_ring_wrap() argument
186 return index & (bp->tx_ring_size - 1); in macb_tx_ring_wrap()
192 index = macb_tx_ring_wrap(queue->bp, index); in macb_tx_desc()
193 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_tx_desc()
200 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; in macb_tx_skb()
207 offset = macb_tx_ring_wrap(queue->bp, index) * in macb_tx_dma()
208 macb_dma_desc_get_size(queue->bp); in macb_tx_dma()
213 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) in macb_rx_ring_wrap() argument
215 return index & (bp->rx_ring_size - 1); in macb_rx_ring_wrap()
220 index = macb_rx_ring_wrap(queue->bp, index); in macb_rx_desc()
221 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_rx_desc()
227 return queue->rx_buffers + queue->bp->rx_buffer_size * in macb_rx_buffer()
228 macb_rx_ring_wrap(queue->bp, index); in macb_rx_buffer()
232 static u32 hw_readl_native(struct macb *bp, int offset) in hw_readl_native() argument
234 return __raw_readl(bp->regs + offset); in hw_readl_native()
237 static void hw_writel_native(struct macb *bp, int offset, u32 value) in hw_writel_native() argument
239 __raw_writel(value, bp->regs + offset); in hw_writel_native()
242 static u32 hw_readl(struct macb *bp, int offset) in hw_readl() argument
244 return readl_relaxed(bp->regs + offset); in hw_readl()
247 static void hw_writel(struct macb *bp, int offset, u32 value) in hw_writel() argument
249 writel_relaxed(value, bp->regs + offset); in hw_writel()
281 static void macb_set_hwaddr(struct macb *bp) in macb_set_hwaddr() argument
286 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); in macb_set_hwaddr()
287 macb_or_gem_writel(bp, SA1B, bottom); in macb_set_hwaddr()
288 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); in macb_set_hwaddr()
289 macb_or_gem_writel(bp, SA1T, top); in macb_set_hwaddr()
292 macb_or_gem_writel(bp, SA2B, 0); in macb_set_hwaddr()
293 macb_or_gem_writel(bp, SA2T, 0); in macb_set_hwaddr()
294 macb_or_gem_writel(bp, SA3B, 0); in macb_set_hwaddr()
295 macb_or_gem_writel(bp, SA3T, 0); in macb_set_hwaddr()
296 macb_or_gem_writel(bp, SA4B, 0); in macb_set_hwaddr()
297 macb_or_gem_writel(bp, SA4T, 0); in macb_set_hwaddr()
300 static void macb_get_hwaddr(struct macb *bp) in macb_get_hwaddr() argument
309 bottom = macb_or_gem_readl(bp, SA1B + i * 8); in macb_get_hwaddr()
310 top = macb_or_gem_readl(bp, SA1T + i * 8); in macb_get_hwaddr()
320 eth_hw_addr_set(bp->dev, addr); in macb_get_hwaddr()
325 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); in macb_get_hwaddr()
326 eth_hw_addr_random(bp->dev); in macb_get_hwaddr()
329 static int macb_mdio_wait_for_idle(struct macb *bp) in macb_mdio_wait_for_idle() argument
333 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), in macb_mdio_wait_for_idle()
339 struct macb *bp = bus->priv; in macb_mdio_read() local
342 status = pm_runtime_resume_and_get(&bp->pdev->dev); in macb_mdio_read()
346 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
351 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_read()
358 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
362 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_read()
368 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) in macb_mdio_read()
375 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
379 status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); in macb_mdio_read()
382 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read()
383 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read()
391 struct macb *bp = bus->priv; in macb_mdio_write() local
394 status = pm_runtime_resume_and_get(&bp->pdev->dev); in macb_mdio_write()
398 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
403 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_write()
410 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
414 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_write()
421 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) in macb_mdio_write()
429 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
434 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write()
435 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write()
440 static void macb_init_buffers(struct macb *bp) in macb_init_buffers() argument
445 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_buffers()
448 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
454 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
466 static void macb_set_tx_clk(struct macb *bp, int speed) in macb_set_tx_clk() argument
470 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG)) in macb_set_tx_clk()
474 if (bp->phy_interface == PHY_INTERFACE_MODE_MII) in macb_set_tx_clk()
491 rate_rounded = clk_round_rate(bp->tx_clk, rate); in macb_set_tx_clk()
501 netdev_warn(bp->dev, in macb_set_tx_clk()
505 if (clk_set_rate(bp->tx_clk, rate_rounded)) in macb_set_tx_clk()
506 netdev_err(bp->dev, "adjusting tx_clk failed.\n"); in macb_set_tx_clk()
513 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); in macb_usx_pcs_link_up() local
516 config = gem_readl(bp, USX_CONTROL); in macb_usx_pcs_link_up()
521 gem_writel(bp, USX_CONTROL, config); in macb_usx_pcs_link_up()
527 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); in macb_usx_pcs_get_state() local
534 val = gem_readl(bp, USX_STATUS); in macb_usx_pcs_get_state()
536 val = gem_readl(bp, NCFGR); in macb_usx_pcs_get_state()
547 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); in macb_usx_pcs_config() local
549 gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | in macb_usx_pcs_config()
591 struct macb *bp = netdev_priv(ndev); in macb_mac_config() local
596 spin_lock_irqsave(&bp->lock, flags); in macb_mac_config()
598 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); in macb_mac_config()
599 old_ncr = ncr = macb_or_gem_readl(bp, NCR); in macb_mac_config()
601 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { in macb_mac_config()
604 } else if (macb_is_gem(bp)) { in macb_mac_config()
613 } else if (bp->caps & MACB_CAPS_MIIONRGMII && in macb_mac_config()
614 bp->phy_interface == PHY_INTERFACE_MODE_MII) { in macb_mac_config()
621 macb_or_gem_writel(bp, NCFGR, ctrl); in macb_mac_config()
624 macb_or_gem_writel(bp, NCR, ncr); in macb_mac_config()
630 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { in macb_mac_config()
633 old_pcsctrl = gem_readl(bp, PCSCNTRL); in macb_mac_config()
639 gem_writel(bp, PCSCNTRL, pcsctrl); in macb_mac_config()
642 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_config()
649 struct macb *bp = netdev_priv(ndev); in macb_mac_link_down() local
654 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_down()
655 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_down()
657 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_down()
660 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); in macb_mac_link_down()
661 macb_writel(bp, NCR, ctrl); in macb_mac_link_down()
673 struct macb *bp = netdev_priv(ndev); in macb_mac_link_up() local
679 spin_lock_irqsave(&bp->lock, flags); in macb_mac_link_up()
681 ctrl = macb_or_gem_readl(bp, NCFGR); in macb_mac_link_up()
691 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { in macb_mac_link_up()
693 if (macb_is_gem(bp)) { in macb_mac_link_up()
703 macb_set_tx_clk(bp, speed); in macb_mac_link_up()
708 bp->macbgem_ops.mog_init_rings(bp); in macb_mac_link_up()
709 macb_init_buffers(bp); in macb_mac_link_up()
711 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_up()
713 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_up()
716 macb_or_gem_writel(bp, NCFGR, ctrl); in macb_mac_link_up()
718 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) in macb_mac_link_up()
719 gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M, in macb_mac_link_up()
720 gem_readl(bp, HS_MAC_CONFIG))); in macb_mac_link_up()
722 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_link_up()
725 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); in macb_mac_link_up()
734 struct macb *bp = netdev_priv(ndev); in macb_mac_select_pcs() local
737 return &bp->phylink_usx_pcs; in macb_mac_select_pcs()
739 return &bp->phylink_sgmii_pcs; in macb_mac_select_pcs()
759 static int macb_phylink_connect(struct macb *bp) in macb_phylink_connect() argument
761 struct device_node *dn = bp->pdev->dev.of_node; in macb_phylink_connect()
762 struct net_device *dev = bp->dev; in macb_phylink_connect()
767 ret = phylink_of_phy_connect(bp->phylink, dn, 0); in macb_phylink_connect()
770 phydev = phy_find_first(bp->mii_bus); in macb_phylink_connect()
777 ret = phylink_connect_phy(bp->phylink, phydev); in macb_phylink_connect()
785 phylink_start(bp->phylink); in macb_phylink_connect()
794 struct macb *bp = netdev_priv(ndev); in macb_get_pcs_fixed_state() local
796 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; in macb_get_pcs_fixed_state()
802 struct macb *bp = netdev_priv(dev); in macb_mii_probe() local
804 bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops; in macb_mii_probe()
805 bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops; in macb_mii_probe()
807 bp->phylink_config.dev = &dev->dev; in macb_mii_probe()
808 bp->phylink_config.type = PHYLINK_NETDEV; in macb_mii_probe()
809 bp->phylink_config.mac_managed_pm = true; in macb_mii_probe()
811 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { in macb_mii_probe()
812 bp->phylink_config.poll_fixed_state = true; in macb_mii_probe()
813 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; in macb_mii_probe()
816 bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | in macb_mii_probe()
820 bp->phylink_config.supported_interfaces); in macb_mii_probe()
822 bp->phylink_config.supported_interfaces); in macb_mii_probe()
825 if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) { in macb_mii_probe()
826 bp->phylink_config.mac_capabilities |= MAC_1000FD; in macb_mii_probe()
827 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) in macb_mii_probe()
828 bp->phylink_config.mac_capabilities |= MAC_1000HD; in macb_mii_probe()
831 bp->phylink_config.supported_interfaces); in macb_mii_probe()
832 phy_interface_set_rgmii(bp->phylink_config.supported_interfaces); in macb_mii_probe()
834 if (bp->caps & MACB_CAPS_PCS) in macb_mii_probe()
836 bp->phylink_config.supported_interfaces); in macb_mii_probe()
838 if (bp->caps & MACB_CAPS_HIGH_SPEED) { in macb_mii_probe()
840 bp->phylink_config.supported_interfaces); in macb_mii_probe()
841 bp->phylink_config.mac_capabilities |= MAC_10000FD; in macb_mii_probe()
845 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, in macb_mii_probe()
846 bp->phy_interface, &macb_phylink_ops); in macb_mii_probe()
847 if (IS_ERR(bp->phylink)) { in macb_mii_probe()
849 PTR_ERR(bp->phylink)); in macb_mii_probe()
850 return PTR_ERR(bp->phylink); in macb_mii_probe()
856 static int macb_mdiobus_register(struct macb *bp) in macb_mdiobus_register() argument
858 struct device_node *child, *np = bp->pdev->dev.of_node; in macb_mdiobus_register()
865 int ret = of_mdiobus_register(bp->mii_bus, child); in macb_mdiobus_register()
872 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
886 return of_mdiobus_register(bp->mii_bus, np); in macb_mdiobus_register()
889 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
892 static int macb_mii_init(struct macb *bp) in macb_mii_init() argument
897 macb_writel(bp, NCR, MACB_BIT(MPE)); in macb_mii_init()
899 bp->mii_bus = mdiobus_alloc(); in macb_mii_init()
900 if (!bp->mii_bus) { in macb_mii_init()
905 bp->mii_bus->name = "MACB_mii_bus"; in macb_mii_init()
906 bp->mii_bus->read = &macb_mdio_read; in macb_mii_init()
907 bp->mii_bus->write = &macb_mdio_write; in macb_mii_init()
908 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in macb_mii_init()
909 bp->pdev->name, bp->pdev->id); in macb_mii_init()
910 bp->mii_bus->priv = bp; in macb_mii_init()
911 bp->mii_bus->parent = &bp->pdev->dev; in macb_mii_init()
913 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); in macb_mii_init()
915 err = macb_mdiobus_register(bp); in macb_mii_init()
919 err = macb_mii_probe(bp->dev); in macb_mii_init()
926 mdiobus_unregister(bp->mii_bus); in macb_mii_init()
928 mdiobus_free(bp->mii_bus); in macb_mii_init()
933 static void macb_update_stats(struct macb *bp) in macb_update_stats() argument
935 u32 *p = &bp->hw_stats.macb.rx_pause_frames; in macb_update_stats()
936 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; in macb_update_stats()
942 *p += bp->macb_reg_readl(bp, offset); in macb_update_stats()
945 static int macb_halt_tx(struct macb *bp) in macb_halt_tx() argument
950 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); in macb_halt_tx()
955 status = macb_readl(bp, TSR); in macb_halt_tx()
965 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget) in macb_tx_unmap() argument
969 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
972 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
983 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) in macb_set_addr() argument
988 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_set_addr()
989 desc_64 = macb_64b_desc(bp, desc); in macb_set_addr()
1001 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) in macb_get_addr() argument
1007 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_get_addr()
1008 desc_64 = macb_64b_desc(bp, desc); in macb_get_addr()
1020 struct macb *bp = queue->bp; in macb_tx_error_task() local
1027 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", in macb_tx_error_task()
1028 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
1038 spin_lock_irqsave(&bp->lock, flags); in macb_tx_error_task()
1041 netif_tx_stop_all_queues(bp->dev); in macb_tx_error_task()
1047 if (macb_halt_tx(bp)) in macb_tx_error_task()
1049 netdev_err(bp->dev, "BUG: halt tx timed out\n"); in macb_tx_error_task()
1065 macb_tx_unmap(bp, tx_skb, 0); in macb_tx_error_task()
1075 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", in macb_tx_error_task()
1076 macb_tx_ring_wrap(bp, tail), in macb_tx_error_task()
1078 bp->dev->stats.tx_packets++; in macb_tx_error_task()
1080 bp->dev->stats.tx_bytes += skb->len; in macb_tx_error_task()
1089 netdev_err(bp->dev, in macb_tx_error_task()
1095 macb_tx_unmap(bp, tx_skb, 0); in macb_tx_error_task()
1100 macb_set_addr(bp, desc, 0); in macb_tx_error_task()
1109 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_tx_error_task()
1117 macb_writel(bp, TSR, macb_readl(bp, TSR)); in macb_tx_error_task()
1121 netif_tx_start_all_queues(bp->dev); in macb_tx_error_task()
1122 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_tx_error_task()
1124 spin_unlock_irqrestore(&bp->lock, flags); in macb_tx_error_task()
1160 struct macb *bp = queue->bp; in macb_tx_complete() local
1161 u16 queue_index = queue - bp->queues; in macb_tx_complete()
1202 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", in macb_tx_complete()
1203 macb_tx_ring_wrap(bp, tail), in macb_tx_complete()
1205 bp->dev->stats.tx_packets++; in macb_tx_complete()
1207 bp->dev->stats.tx_bytes += skb->len; in macb_tx_complete()
1213 macb_tx_unmap(bp, tx_skb, budget); in macb_tx_complete()
1225 if (__netif_subqueue_stopped(bp->dev, queue_index) && in macb_tx_complete()
1227 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) in macb_tx_complete()
1228 netif_wake_subqueue(bp->dev, queue_index); in macb_tx_complete()
1239 struct macb *bp = queue->bp; in gem_rx_refill() local
1243 bp->rx_ring_size) > 0) { in gem_rx_refill()
1244 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); in gem_rx_refill()
1253 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); in gem_rx_refill()
1255 netdev_err(bp->dev, in gem_rx_refill()
1261 paddr = dma_map_single(&bp->pdev->dev, skb->data, in gem_rx_refill()
1262 bp->rx_buffer_size, in gem_rx_refill()
1264 if (dma_mapping_error(&bp->pdev->dev, paddr)) { in gem_rx_refill()
1271 if (entry == bp->rx_ring_size - 1) in gem_rx_refill()
1278 macb_set_addr(bp, desc, paddr); in gem_rx_refill()
1293 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", in gem_rx_refill()
1321 struct macb *bp = queue->bp; in gem_rx() local
1333 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in gem_rx()
1340 addr = macb_get_addr(bp, desc); in gem_rx()
1354 netdev_err(bp->dev, in gem_rx()
1356 bp->dev->stats.rx_dropped++; in gem_rx()
1362 netdev_err(bp->dev, in gem_rx()
1364 bp->dev->stats.rx_dropped++; in gem_rx()
1370 len = ctrl & bp->rx_frm_len_mask; in gem_rx()
1372 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); in gem_rx()
1375 dma_unmap_single(&bp->pdev->dev, addr, in gem_rx()
1376 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx()
1378 skb->protocol = eth_type_trans(skb, bp->dev); in gem_rx()
1380 if (bp->dev->features & NETIF_F_RXCSUM && in gem_rx()
1381 !(bp->dev->flags & IFF_PROMISC) && in gem_rx()
1385 bp->dev->stats.rx_packets++; in gem_rx()
1387 bp->dev->stats.rx_bytes += skb->len; in gem_rx()
1390 gem_ptp_do_rxstamp(bp, skb, desc); in gem_rx()
1393 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in gem_rx()
1417 struct macb *bp = queue->bp; in macb_rx_frame() local
1420 len = desc->ctrl & bp->rx_frm_len_mask; in macb_rx_frame()
1422 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", in macb_rx_frame()
1423 macb_rx_ring_wrap(bp, first_frag), in macb_rx_frame()
1424 macb_rx_ring_wrap(bp, last_frag), len); in macb_rx_frame()
1434 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); in macb_rx_frame()
1436 bp->dev->stats.rx_dropped++; in macb_rx_frame()
1456 unsigned int frag_len = bp->rx_buffer_size; in macb_rx_frame()
1468 offset += bp->rx_buffer_size; in macb_rx_frame()
1480 skb->protocol = eth_type_trans(skb, bp->dev); in macb_rx_frame()
1482 bp->dev->stats.rx_packets++; in macb_rx_frame()
1483 bp->dev->stats.rx_bytes += skb->len; in macb_rx_frame()
1484 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in macb_rx_frame()
1493 struct macb *bp = queue->bp; in macb_init_rx_ring() local
1499 for (i = 0; i < bp->rx_ring_size; i++) { in macb_init_rx_ring()
1501 macb_set_addr(bp, desc, addr); in macb_init_rx_ring()
1503 addr += bp->rx_buffer_size; in macb_init_rx_ring()
1512 struct macb *bp = queue->bp; in macb_rx() local
1564 netdev_err(bp->dev, "RX queue corruption: reset it\n"); in macb_rx()
1566 spin_lock_irqsave(&bp->lock, flags); in macb_rx()
1568 ctrl = macb_readl(bp, NCR); in macb_rx()
1569 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); in macb_rx()
1574 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); in macb_rx()
1576 spin_unlock_irqrestore(&bp->lock, flags); in macb_rx()
1590 struct macb *bp = queue->bp; in macb_rx_pending() local
1594 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in macb_rx_pending()
1606 struct macb *bp = queue->bp; in macb_rx_poll() local
1609 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); in macb_rx_poll()
1611 netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n", in macb_rx_poll()
1612 (unsigned int)(queue - bp->queues), work_done, budget); in macb_rx_poll()
1615 queue_writel(queue, IER, bp->rx_intr_mask); in macb_rx_poll()
1628 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_rx_poll()
1629 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_rx_poll()
1631 netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n"); in macb_rx_poll()
1643 struct macb *bp = queue->bp; in macb_tx_restart() local
1651 tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); in macb_tx_restart()
1652 tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); in macb_tx_restart()
1653 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head)); in macb_tx_restart()
1658 spin_lock_irq(&bp->lock); in macb_tx_restart()
1659 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_tx_restart()
1660 spin_unlock_irq(&bp->lock); in macb_tx_restart()
1685 struct macb *bp = queue->bp; in macb_tx_poll() local
1693 netdev_vdbg(bp->dev, "poll: tx restart\n"); in macb_tx_poll()
1697 netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n", in macb_tx_poll()
1698 (unsigned int)(queue - bp->queues), work_done, budget); in macb_tx_poll()
1715 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_poll()
1717 netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n"); in macb_tx_poll()
1727 struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); in macb_hresp_error_task() local
1728 struct net_device *dev = bp->dev; in macb_hresp_error_task()
1733 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_hresp_error_task()
1734 queue_writel(queue, IDR, bp->rx_intr_mask | in macb_hresp_error_task()
1738 ctrl = macb_readl(bp, NCR); in macb_hresp_error_task()
1740 macb_writel(bp, NCR, ctrl); in macb_hresp_error_task()
1745 bp->macbgem_ops.mog_init_rings(bp); in macb_hresp_error_task()
1748 macb_init_buffers(bp); in macb_hresp_error_task()
1751 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_hresp_error_task()
1753 bp->rx_intr_mask | in macb_hresp_error_task()
1758 macb_writel(bp, NCR, ctrl); in macb_hresp_error_task()
1767 struct macb *bp = queue->bp; in macb_wol_interrupt() local
1775 spin_lock(&bp->lock); in macb_wol_interrupt()
1779 macb_writel(bp, WOL, 0); in macb_wol_interrupt()
1780 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", in macb_wol_interrupt()
1781 (unsigned int)(queue - bp->queues), in macb_wol_interrupt()
1783 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_wol_interrupt()
1785 pm_wakeup_event(&bp->pdev->dev, 0); in macb_wol_interrupt()
1788 spin_unlock(&bp->lock); in macb_wol_interrupt()
1796 struct macb *bp = queue->bp; in gem_wol_interrupt() local
1804 spin_lock(&bp->lock); in gem_wol_interrupt()
1808 gem_writel(bp, WOL, 0); in gem_wol_interrupt()
1809 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", in gem_wol_interrupt()
1810 (unsigned int)(queue - bp->queues), in gem_wol_interrupt()
1812 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in gem_wol_interrupt()
1814 pm_wakeup_event(&bp->pdev->dev, 0); in gem_wol_interrupt()
1817 spin_unlock(&bp->lock); in gem_wol_interrupt()
1825 struct macb *bp = queue->bp; in macb_interrupt() local
1826 struct net_device *dev = bp->dev; in macb_interrupt()
1834 spin_lock(&bp->lock); in macb_interrupt()
1840 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1845 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", in macb_interrupt()
1846 (unsigned int)(queue - bp->queues), in macb_interrupt()
1849 if (status & bp->rx_intr_mask) { in macb_interrupt()
1856 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_interrupt()
1857 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1861 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); in macb_interrupt()
1869 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1879 netdev_vdbg(bp->dev, "scheduling TX softirq\n"); in macb_interrupt()
1888 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1906 ctrl = macb_readl(bp, NCR); in macb_interrupt()
1907 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); in macb_interrupt()
1909 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); in macb_interrupt()
1911 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1917 if (macb_is_gem(bp)) in macb_interrupt()
1918 bp->hw_stats.gem.rx_overruns++; in macb_interrupt()
1920 bp->hw_stats.macb.rx_overruns++; in macb_interrupt()
1922 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1927 tasklet_schedule(&bp->hresp_err_tasklet); in macb_interrupt()
1930 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1936 spin_unlock(&bp->lock); in macb_interrupt()
1947 struct macb *bp = netdev_priv(dev); in macb_poll_controller() local
1953 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
1959 static unsigned int macb_tx_map(struct macb *bp, in macb_tx_map() argument
1991 entry = macb_tx_ring_wrap(bp, tx_head); in macb_tx_map()
1994 mapping = dma_map_single(&bp->pdev->dev, in macb_tx_map()
1997 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
2011 size = min(len, bp->max_tx_length); in macb_tx_map()
2021 size = min(len, bp->max_tx_length); in macb_tx_map()
2022 entry = macb_tx_ring_wrap(bp, tx_head); in macb_tx_map()
2025 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, in macb_tx_map()
2027 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
2045 netdev_err(bp->dev, "BUG! empty skb!\n"); in macb_tx_map()
2060 entry = macb_tx_ring_wrap(bp, i); in macb_tx_map()
2082 entry = macb_tx_ring_wrap(bp, i); in macb_tx_map()
2091 if (unlikely(entry == (bp->tx_ring_size - 1))) in macb_tx_map()
2098 if ((bp->dev->features & NETIF_F_HW_CSUM) && in macb_tx_map()
2109 macb_set_addr(bp, desc, tx_skb->mapping); in macb_tx_map()
2122 netdev_err(bp->dev, "TX DMA map failed\n"); in macb_tx_map()
2127 macb_tx_unmap(bp, tx_skb, 0); in macb_tx_map()
2240 struct macb *bp = netdev_priv(dev); in macb_start_xmit() local
2241 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
2267 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); in macb_start_xmit()
2272 hdrlen = min(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2275 netdev_vdbg(bp->dev, in macb_start_xmit()
2289 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; in macb_start_xmit()
2291 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2295 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); in macb_start_xmit()
2302 bp->tx_ring_size) < desc_cnt) { in macb_start_xmit()
2304 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", in macb_start_xmit()
2311 if (!macb_tx_map(bp, queue, skb, hdrlen)) { in macb_start_xmit()
2320 spin_lock_irq(&bp->lock); in macb_start_xmit()
2321 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_start_xmit()
2322 spin_unlock_irq(&bp->lock); in macb_start_xmit()
2324 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) in macb_start_xmit()
2333 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) in macb_init_rx_buffer_size() argument
2335 if (!macb_is_gem(bp)) { in macb_init_rx_buffer_size()
2336 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; in macb_init_rx_buffer_size()
2338 bp->rx_buffer_size = size; in macb_init_rx_buffer_size()
2340 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { in macb_init_rx_buffer_size()
2341 netdev_dbg(bp->dev, in macb_init_rx_buffer_size()
2344 bp->rx_buffer_size = in macb_init_rx_buffer_size()
2345 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); in macb_init_rx_buffer_size()
2349 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", in macb_init_rx_buffer_size()
2350 bp->dev->mtu, bp->rx_buffer_size); in macb_init_rx_buffer_size()
2353 static void gem_free_rx_buffers(struct macb *bp) in gem_free_rx_buffers() argument
2362 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_free_rx_buffers()
2366 for (i = 0; i < bp->rx_ring_size; i++) { in gem_free_rx_buffers()
2373 addr = macb_get_addr(bp, desc); in gem_free_rx_buffers()
2375 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, in gem_free_rx_buffers()
2386 static void macb_free_rx_buffers(struct macb *bp) in macb_free_rx_buffers() argument
2388 struct macb_queue *queue = &bp->queues[0]; in macb_free_rx_buffers()
2391 dma_free_coherent(&bp->pdev->dev, in macb_free_rx_buffers()
2392 bp->rx_ring_size * bp->rx_buffer_size, in macb_free_rx_buffers()
2398 static void macb_free_consistent(struct macb *bp) in macb_free_consistent() argument
2404 bp->macbgem_ops.mog_free_rx_buffers(bp); in macb_free_consistent()
2406 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
2410 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_free_consistent()
2411 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2416 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_free_consistent()
2417 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2424 static int gem_alloc_rx_buffers(struct macb *bp) in gem_alloc_rx_buffers() argument
2430 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_alloc_rx_buffers()
2431 size = bp->rx_ring_size * sizeof(struct sk_buff *); in gem_alloc_rx_buffers()
2436 netdev_dbg(bp->dev, in gem_alloc_rx_buffers()
2438 bp->rx_ring_size, queue->rx_skbuff); in gem_alloc_rx_buffers()
2443 static int macb_alloc_rx_buffers(struct macb *bp) in macb_alloc_rx_buffers() argument
2445 struct macb_queue *queue = &bp->queues[0]; in macb_alloc_rx_buffers()
2448 size = bp->rx_ring_size * bp->rx_buffer_size; in macb_alloc_rx_buffers()
2449 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_rx_buffers()
2454 netdev_dbg(bp->dev, in macb_alloc_rx_buffers()
2460 static int macb_alloc_consistent(struct macb *bp) in macb_alloc_consistent() argument
2466 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
2467 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_alloc_consistent()
2468 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2473 netdev_dbg(bp->dev, in macb_alloc_consistent()
2478 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); in macb_alloc_consistent()
2483 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_alloc_consistent()
2484 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2488 netdev_dbg(bp->dev, in macb_alloc_consistent()
2492 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) in macb_alloc_consistent()
2498 macb_free_consistent(bp); in macb_alloc_consistent()
2502 static void gem_init_rings(struct macb *bp) in gem_init_rings() argument
2509 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
2510 for (i = 0; i < bp->tx_ring_size; i++) { in gem_init_rings()
2512 macb_set_addr(bp, desc, 0); in gem_init_rings()
2527 static void macb_init_rings(struct macb *bp) in macb_init_rings() argument
2532 macb_init_rx_ring(&bp->queues[0]); in macb_init_rings()
2534 for (i = 0; i < bp->tx_ring_size; i++) { in macb_init_rings()
2535 desc = macb_tx_desc(&bp->queues[0], i); in macb_init_rings()
2536 macb_set_addr(bp, desc, 0); in macb_init_rings()
2539 bp->queues[0].tx_head = 0; in macb_init_rings()
2540 bp->queues[0].tx_tail = 0; in macb_init_rings()
2544 static void macb_reset_hw(struct macb *bp) in macb_reset_hw() argument
2548 u32 ctrl = macb_readl(bp, NCR); in macb_reset_hw()
2558 macb_writel(bp, NCR, ctrl); in macb_reset_hw()
2561 macb_writel(bp, TSR, -1); in macb_reset_hw()
2562 macb_writel(bp, RSR, -1); in macb_reset_hw()
2565 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_reset_hw()
2568 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_reset_hw()
2573 static u32 gem_mdc_clk_div(struct macb *bp) in gem_mdc_clk_div() argument
2576 unsigned long pclk_hz = clk_get_rate(bp->pclk); in gem_mdc_clk_div()
2594 static u32 macb_mdc_clk_div(struct macb *bp) in macb_mdc_clk_div() argument
2599 if (macb_is_gem(bp)) in macb_mdc_clk_div()
2600 return gem_mdc_clk_div(bp); in macb_mdc_clk_div()
2602 pclk_hz = clk_get_rate(bp->pclk); in macb_mdc_clk_div()
2619 static u32 macb_dbw(struct macb *bp) in macb_dbw() argument
2621 if (!macb_is_gem(bp)) in macb_dbw()
2624 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { in macb_dbw()
2642 static void macb_configure_dma(struct macb *bp) in macb_configure_dma() argument
2649 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; in macb_configure_dma()
2650 if (macb_is_gem(bp)) { in macb_configure_dma()
2651 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); in macb_configure_dma()
2652 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_configure_dma()
2658 if (bp->dma_burst_length) in macb_configure_dma()
2659 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); in macb_configure_dma()
2663 if (bp->native_io) in macb_configure_dma()
2668 if (bp->dev->features & NETIF_F_HW_CSUM) in macb_configure_dma()
2675 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_configure_dma()
2679 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_configure_dma()
2682 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", in macb_configure_dma()
2684 gem_writel(bp, DMACFG, dmacfg); in macb_configure_dma()
2688 static void macb_init_hw(struct macb *bp) in macb_init_hw() argument
2692 macb_reset_hw(bp); in macb_init_hw()
2693 macb_set_hwaddr(bp); in macb_init_hw()
2695 config = macb_mdc_clk_div(bp); in macb_init_hw()
2698 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2702 if (bp->dev->flags & IFF_PROMISC) in macb_init_hw()
2704 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) in macb_init_hw()
2706 if (!(bp->dev->flags & IFF_BROADCAST)) in macb_init_hw()
2708 config |= macb_dbw(bp); in macb_init_hw()
2709 macb_writel(bp, NCFGR, config); in macb_init_hw()
2710 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_init_hw()
2711 gem_writel(bp, JML, bp->jumbo_max_len); in macb_init_hw()
2712 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; in macb_init_hw()
2713 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2714 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; in macb_init_hw()
2716 macb_configure_dma(bp); in macb_init_hw()
2781 struct macb *bp = netdev_priv(dev); in macb_sethashtable() local
2791 macb_or_gem_writel(bp, HRB, mc_filter[0]); in macb_sethashtable()
2792 macb_or_gem_writel(bp, HRT, mc_filter[1]); in macb_sethashtable()
2799 struct macb *bp = netdev_priv(dev); in macb_set_rx_mode() local
2801 cfg = macb_readl(bp, NCFGR); in macb_set_rx_mode()
2808 if (macb_is_gem(bp)) in macb_set_rx_mode()
2815 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) in macb_set_rx_mode()
2821 macb_or_gem_writel(bp, HRB, -1); in macb_set_rx_mode()
2822 macb_or_gem_writel(bp, HRT, -1); in macb_set_rx_mode()
2830 macb_or_gem_writel(bp, HRB, 0); in macb_set_rx_mode()
2831 macb_or_gem_writel(bp, HRT, 0); in macb_set_rx_mode()
2835 macb_writel(bp, NCFGR, cfg); in macb_set_rx_mode()
2841 struct macb *bp = netdev_priv(dev); in macb_open() local
2846 netdev_dbg(bp->dev, "open\n"); in macb_open()
2848 err = pm_runtime_resume_and_get(&bp->pdev->dev); in macb_open()
2853 macb_init_rx_buffer_size(bp, bufsz); in macb_open()
2855 err = macb_alloc_consistent(bp); in macb_open()
2862 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_open()
2867 macb_init_hw(bp); in macb_open()
2869 err = phy_power_on(bp->sgmii_phy); in macb_open()
2873 err = macb_phylink_connect(bp); in macb_open()
2879 if (bp->ptp_info) in macb_open()
2880 bp->ptp_info->ptp_init(dev); in macb_open()
2885 phy_power_off(bp->sgmii_phy); in macb_open()
2888 macb_reset_hw(bp); in macb_open()
2889 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_open()
2893 macb_free_consistent(bp); in macb_open()
2895 pm_runtime_put_sync(&bp->pdev->dev); in macb_open()
2901 struct macb *bp = netdev_priv(dev); in macb_close() local
2908 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_close()
2913 phylink_stop(bp->phylink); in macb_close()
2914 phylink_disconnect_phy(bp->phylink); in macb_close()
2916 phy_power_off(bp->sgmii_phy); in macb_close()
2918 spin_lock_irqsave(&bp->lock, flags); in macb_close()
2919 macb_reset_hw(bp); in macb_close()
2921 spin_unlock_irqrestore(&bp->lock, flags); in macb_close()
2923 macb_free_consistent(bp); in macb_close()
2925 if (bp->ptp_info) in macb_close()
2926 bp->ptp_info->ptp_remove(dev); in macb_close()
2928 pm_runtime_put(&bp->pdev->dev); in macb_close()
2943 static void gem_update_stats(struct macb *bp) in gem_update_stats() argument
2949 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; in gem_update_stats()
2953 u64 val = bp->macb_reg_readl(bp, offset); in gem_update_stats()
2955 bp->ethtool_stats[i] += val; in gem_update_stats()
2960 val = bp->macb_reg_readl(bp, offset + 4); in gem_update_stats()
2961 bp->ethtool_stats[i] += ((u64)val) << 32; in gem_update_stats()
2967 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in gem_update_stats()
2969 bp->ethtool_stats[idx++] = *stat; in gem_update_stats()
2972 static struct net_device_stats *gem_get_stats(struct macb *bp) in gem_get_stats() argument
2974 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_stats()
2975 struct net_device_stats *nstat = &bp->dev->stats; in gem_get_stats()
2977 if (!netif_running(bp->dev)) in gem_get_stats()
2980 gem_update_stats(bp); in gem_get_stats()
3016 struct macb *bp; in gem_get_ethtool_stats() local
3018 bp = netdev_priv(dev); in gem_get_ethtool_stats()
3019 gem_update_stats(bp); in gem_get_ethtool_stats()
3020 memcpy(data, &bp->ethtool_stats, sizeof(u64) in gem_get_ethtool_stats()
3026 struct macb *bp = netdev_priv(dev); in gem_get_sset_count() local
3030 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; in gem_get_sset_count()
3039 struct macb *bp = netdev_priv(dev); in gem_get_ethtool_strings() local
3050 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_get_ethtool_strings()
3063 struct macb *bp = netdev_priv(dev); in macb_get_stats() local
3064 struct net_device_stats *nstat = &bp->dev->stats; in macb_get_stats()
3065 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_stats()
3067 if (macb_is_gem(bp)) in macb_get_stats()
3068 return gem_get_stats(bp); in macb_get_stats()
3071 macb_update_stats(bp); in macb_get_stats()
3116 struct macb *bp = netdev_priv(dev); in macb_get_regs() local
3120 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) in macb_get_regs()
3123 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); in macb_get_regs()
3124 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); in macb_get_regs()
3126 regs_buff[0] = macb_readl(bp, NCR); in macb_get_regs()
3127 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); in macb_get_regs()
3128 regs_buff[2] = macb_readl(bp, NSR); in macb_get_regs()
3129 regs_buff[3] = macb_readl(bp, TSR); in macb_get_regs()
3130 regs_buff[4] = macb_readl(bp, RBQP); in macb_get_regs()
3131 regs_buff[5] = macb_readl(bp, TBQP); in macb_get_regs()
3132 regs_buff[6] = macb_readl(bp, RSR); in macb_get_regs()
3133 regs_buff[7] = macb_readl(bp, IMR); in macb_get_regs()
3137 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); in macb_get_regs()
3138 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); in macb_get_regs()
3140 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_get_regs()
3141 regs_buff[12] = macb_or_gem_readl(bp, USRIO); in macb_get_regs()
3142 if (macb_is_gem(bp)) in macb_get_regs()
3143 regs_buff[13] = gem_readl(bp, DMACFG); in macb_get_regs()
3148 struct macb *bp = netdev_priv(netdev); in macb_get_wol() local
3150 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { in macb_get_wol()
3151 phylink_ethtool_get_wol(bp->phylink, wol); in macb_get_wol()
3154 if (bp->wol & MACB_WOL_ENABLED) in macb_get_wol()
3161 struct macb *bp = netdev_priv(netdev); in macb_set_wol() local
3165 ret = phylink_ethtool_set_wol(bp->phylink, wol); in macb_set_wol()
3172 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || in macb_set_wol()
3177 bp->wol |= MACB_WOL_ENABLED; in macb_set_wol()
3179 bp->wol &= ~MACB_WOL_ENABLED; in macb_set_wol()
3181 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); in macb_set_wol()
3189 struct macb *bp = netdev_priv(netdev); in macb_get_link_ksettings() local
3191 return phylink_ethtool_ksettings_get(bp->phylink, kset); in macb_get_link_ksettings()
3197 struct macb *bp = netdev_priv(netdev); in macb_set_link_ksettings() local
3199 return phylink_ethtool_ksettings_set(bp->phylink, kset); in macb_set_link_ksettings()
3207 struct macb *bp = netdev_priv(netdev); in macb_get_ringparam() local
3212 ring->rx_pending = bp->rx_ring_size; in macb_get_ringparam()
3213 ring->tx_pending = bp->tx_ring_size; in macb_get_ringparam()
3221 struct macb *bp = netdev_priv(netdev); in macb_set_ringparam() local
3236 if ((new_tx_size == bp->tx_ring_size) && in macb_set_ringparam()
3237 (new_rx_size == bp->rx_ring_size)) { in macb_set_ringparam()
3242 if (netif_running(bp->dev)) { in macb_set_ringparam()
3244 macb_close(bp->dev); in macb_set_ringparam()
3247 bp->rx_ring_size = new_rx_size; in macb_set_ringparam()
3248 bp->tx_ring_size = new_tx_size; in macb_set_ringparam()
3251 macb_open(bp->dev); in macb_set_ringparam()
3257 static unsigned int gem_get_tsu_rate(struct macb *bp) in gem_get_tsu_rate() argument
3262 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); in gem_get_tsu_rate()
3266 else if (!IS_ERR(bp->pclk)) { in gem_get_tsu_rate()
3267 tsu_clk = bp->pclk; in gem_get_tsu_rate()
3282 struct macb *bp = netdev_priv(dev); in gem_get_ts_info() local
3284 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { in gem_get_ts_info()
3304 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; in gem_get_ts_info()
3323 struct macb *bp = netdev_priv(netdev); in macb_get_ts_info() local
3325 if (bp->ptp_info) in macb_get_ts_info()
3326 return bp->ptp_info->get_ts_info(netdev, info); in macb_get_ts_info()
3331 static void gem_enable_flow_filters(struct macb *bp, bool enable) in gem_enable_flow_filters() argument
3333 struct net_device *netdev = bp->dev; in gem_enable_flow_filters()
3341 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); in gem_enable_flow_filters()
3343 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_enable_flow_filters()
3350 t2_scr = gem_readl_n(bp, SCRT2, fs->location); in gem_enable_flow_filters()
3373 gem_writel_n(bp, SCRT2, fs->location, t2_scr); in gem_enable_flow_filters()
3377 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) in gem_prog_cmp_regs() argument
3386 if (!macb_is_gem(bp)) in gem_prog_cmp_regs()
3401 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); in gem_prog_cmp_regs()
3402 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); in gem_prog_cmp_regs()
3415 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); in gem_prog_cmp_regs()
3416 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); in gem_prog_cmp_regs()
3443 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); in gem_prog_cmp_regs()
3444 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); in gem_prog_cmp_regs()
3457 gem_writel_n(bp, SCRT2, index, t2_scr); in gem_prog_cmp_regs()
3463 struct macb *bp = netdev_priv(netdev); in gem_add_flow_filter() local
3483 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3486 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_add_flow_filter()
3499 list_add_tail(&newfs->list, &bp->rx_fs_list.list); in gem_add_flow_filter()
3501 gem_prog_cmp_regs(bp, fs); in gem_add_flow_filter()
3502 bp->rx_fs_list.count++; in gem_add_flow_filter()
3504 gem_enable_flow_filters(bp, 1); in gem_add_flow_filter()
3506 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3510 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3518 struct macb *bp = netdev_priv(netdev); in gem_del_flow_filter() local
3523 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3525 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_del_flow_filter()
3537 gem_writel_n(bp, SCRT2, fs->location, 0); in gem_del_flow_filter()
3540 bp->rx_fs_list.count--; in gem_del_flow_filter()
3541 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3547 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3554 struct macb *bp = netdev_priv(netdev); in gem_get_flow_entry() local
3557 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_flow_entry()
3569 struct macb *bp = netdev_priv(netdev); in gem_get_all_flow_entries() local
3573 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_all_flow_entries()
3579 cmd->data = bp->max_tuples; in gem_get_all_flow_entries()
3588 struct macb *bp = netdev_priv(netdev); in gem_get_rxnfc() local
3593 cmd->data = bp->num_queues; in gem_get_rxnfc()
3596 cmd->rule_cnt = bp->rx_fs_list.count; in gem_get_rxnfc()
3615 struct macb *bp = netdev_priv(netdev); in gem_set_rxnfc() local
3620 if ((cmd->fs.location >= bp->max_tuples) in gem_set_rxnfc()
3621 || (cmd->fs.ring_cookie >= bp->num_queues)) { in gem_set_rxnfc()
3672 struct macb *bp = netdev_priv(dev); in macb_ioctl() local
3677 if (bp->ptp_info) { in macb_ioctl()
3680 return bp->ptp_info->set_hwtst(dev, rq, cmd); in macb_ioctl()
3682 return bp->ptp_info->get_hwtst(dev, rq); in macb_ioctl()
3686 return phylink_mii_ioctl(bp->phylink, rq, cmd); in macb_ioctl()
3689 static inline void macb_set_txcsum_feature(struct macb *bp, in macb_set_txcsum_feature() argument
3694 if (!macb_is_gem(bp)) in macb_set_txcsum_feature()
3697 val = gem_readl(bp, DMACFG); in macb_set_txcsum_feature()
3703 gem_writel(bp, DMACFG, val); in macb_set_txcsum_feature()
3706 static inline void macb_set_rxcsum_feature(struct macb *bp, in macb_set_rxcsum_feature() argument
3709 struct net_device *netdev = bp->dev; in macb_set_rxcsum_feature()
3712 if (!macb_is_gem(bp)) in macb_set_rxcsum_feature()
3715 val = gem_readl(bp, NCFGR); in macb_set_rxcsum_feature()
3721 gem_writel(bp, NCFGR, val); in macb_set_rxcsum_feature()
3724 static inline void macb_set_rxflow_feature(struct macb *bp, in macb_set_rxflow_feature() argument
3727 if (!macb_is_gem(bp)) in macb_set_rxflow_feature()
3730 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); in macb_set_rxflow_feature()
3736 struct macb *bp = netdev_priv(netdev); in macb_set_features() local
3741 macb_set_txcsum_feature(bp, features); in macb_set_features()
3745 macb_set_rxcsum_feature(bp, features); in macb_set_features()
3749 macb_set_rxflow_feature(bp, features); in macb_set_features()
3754 static void macb_restore_features(struct macb *bp) in macb_restore_features() argument
3756 struct net_device *netdev = bp->dev; in macb_restore_features()
3761 macb_set_txcsum_feature(bp, features); in macb_restore_features()
3764 macb_set_rxcsum_feature(bp, features); in macb_restore_features()
3767 list_for_each_entry(item, &bp->rx_fs_list.list, list) in macb_restore_features()
3768 gem_prog_cmp_regs(bp, &item->fs); in macb_restore_features()
3770 macb_set_rxflow_feature(bp, features); in macb_restore_features()
3793 static void macb_configure_caps(struct macb *bp, in macb_configure_caps() argument
3799 bp->caps = dt_conf->caps; in macb_configure_caps()
3801 if (hw_is_gem(bp->regs, bp->native_io)) { in macb_configure_caps()
3802 bp->caps |= MACB_CAPS_MACB_IS_GEM; in macb_configure_caps()
3804 dcfg = gem_readl(bp, DCFG1); in macb_configure_caps()
3806 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; in macb_configure_caps()
3808 bp->caps |= MACB_CAPS_PCS; in macb_configure_caps()
3809 dcfg = gem_readl(bp, DCFG12); in macb_configure_caps()
3811 bp->caps |= MACB_CAPS_HIGH_SPEED; in macb_configure_caps()
3812 dcfg = gem_readl(bp, DCFG2); in macb_configure_caps()
3814 bp->caps |= MACB_CAPS_FIFO_MODE; in macb_configure_caps()
3816 if (gem_has_ptp(bp)) { in macb_configure_caps()
3817 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) in macb_configure_caps()
3818 dev_err(&bp->pdev->dev, in macb_configure_caps()
3821 bp->hw_dma_cap |= HW_DMA_CAP_PTP; in macb_configure_caps()
3822 bp->ptp_info = &gem_ptp_info; in macb_configure_caps()
3828 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); in macb_configure_caps()
3956 struct macb *bp = netdev_priv(dev); in macb_init() local
3961 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; in macb_init()
3962 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; in macb_init()
3969 if (!(bp->queue_mask & (1 << hw_q))) in macb_init()
3972 queue = &bp->queues[q]; in macb_init()
3973 queue->bp = bp; in macb_init()
3986 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
4000 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
4029 if (macb_is_gem(bp)) { in macb_init()
4030 bp->max_tx_length = GEM_MAX_TX_LEN; in macb_init()
4031 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; in macb_init()
4032 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; in macb_init()
4033 bp->macbgem_ops.mog_init_rings = gem_init_rings; in macb_init()
4034 bp->macbgem_ops.mog_rx = gem_rx; in macb_init()
4037 bp->max_tx_length = MACB_MAX_TX_LEN; in macb_init()
4038 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; in macb_init()
4039 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; in macb_init()
4040 bp->macbgem_ops.mog_init_rings = macb_init_rings; in macb_init()
4041 bp->macbgem_ops.mog_rx = macb_rx; in macb_init()
4049 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) in macb_init()
4053 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) in macb_init()
4055 if (bp->caps & MACB_CAPS_SG_DISABLED) in macb_init()
4063 reg = gem_readl(bp, DCFG8); in macb_init()
4064 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), in macb_init()
4066 INIT_LIST_HEAD(&bp->rx_fs_list.list); in macb_init()
4067 if (bp->max_tuples > 0) { in macb_init()
4073 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); in macb_init()
4077 bp->rx_fs_list.count = 0; in macb_init()
4078 spin_lock_init(&bp->rx_fs_lock); in macb_init()
4080 bp->max_tuples = 0; in macb_init()
4083 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { in macb_init()
4085 if (phy_interface_mode_is_rgmii(bp->phy_interface)) in macb_init()
4086 val = bp->usrio->rgmii; in macb_init()
4087 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && in macb_init()
4088 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4089 val = bp->usrio->rmii; in macb_init()
4090 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4091 val = bp->usrio->mii; in macb_init()
4093 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) in macb_init()
4094 val |= bp->usrio->refclk; in macb_init()
4096 macb_or_gem_writel(bp, USRIO, val); in macb_init()
4100 val = macb_mdc_clk_div(bp); in macb_init()
4101 val |= macb_dbw(bp); in macb_init()
4102 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init()
4104 macb_writel(bp, NCFGR, val); in macb_init()
4472 struct macb *bp = netdev_priv(dev); in at91ether_init() local
4475 bp->queues[0].bp = bp; in at91ether_init()
4485 macb_writel(bp, NCR, 0); in at91ether_init()
4487 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); in at91ether_init()
4602 struct macb *bp = netdev_priv(dev); in init_reset_optional() local
4605 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { in init_reset_optional()
4607 bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL); in init_reset_optional()
4609 if (IS_ERR(bp->sgmii_phy)) in init_reset_optional()
4610 return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy), in init_reset_optional()
4613 ret = phy_init(bp->sgmii_phy); in init_reset_optional()
4641 phy_exit(bp->sgmii_phy); in init_reset_optional()
4649 phy_exit(bp->sgmii_phy); in init_reset_optional()
4857 struct macb *bp; in macb_probe() local
4887 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); in macb_probe()
4897 bp = netdev_priv(dev); in macb_probe()
4898 bp->pdev = pdev; in macb_probe()
4899 bp->dev = dev; in macb_probe()
4900 bp->regs = mem; in macb_probe()
4901 bp->native_io = native_io; in macb_probe()
4903 bp->macb_reg_readl = hw_readl_native; in macb_probe()
4904 bp->macb_reg_writel = hw_writel_native; in macb_probe()
4906 bp->macb_reg_readl = hw_readl; in macb_probe()
4907 bp->macb_reg_writel = hw_writel; in macb_probe()
4909 bp->num_queues = num_queues; in macb_probe()
4910 bp->queue_mask = queue_mask; in macb_probe()
4912 bp->dma_burst_length = macb_config->dma_burst_length; in macb_probe()
4913 bp->pclk = pclk; in macb_probe()
4914 bp->hclk = hclk; in macb_probe()
4915 bp->tx_clk = tx_clk; in macb_probe()
4916 bp->rx_clk = rx_clk; in macb_probe()
4917 bp->tsu_clk = tsu_clk; in macb_probe()
4919 bp->jumbo_max_len = macb_config->jumbo_max_len; in macb_probe()
4921 bp->wol = 0; in macb_probe()
4923 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; in macb_probe()
4924 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); in macb_probe()
4926 bp->usrio = macb_config->usrio; in macb_probe()
4928 spin_lock_init(&bp->lock); in macb_probe()
4931 macb_configure_caps(bp, macb_config); in macb_probe()
4934 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { in macb_probe()
4936 bp->hw_dma_cap |= HW_DMA_CAP_64B; in macb_probe()
4949 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_probe()
4950 dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
4954 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { in macb_probe()
4955 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); in macb_probe()
4957 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4958 macb_dma_desc_get_size(bp); in macb_probe()
4960 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); in macb_probe()
4962 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4963 macb_dma_desc_get_size(bp); in macb_probe()
4966 bp->rx_intr_mask = MACB_RX_INT_FLAGS; in macb_probe()
4967 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) in macb_probe()
4968 bp->rx_intr_mask |= MACB_BIT(RXUBR); in macb_probe()
4970 err = of_get_ethdev_address(np, bp->dev); in macb_probe()
4974 macb_get_hwaddr(bp); in macb_probe()
4979 bp->phy_interface = PHY_INTERFACE_MODE_MII; in macb_probe()
4981 bp->phy_interface = interface; in macb_probe()
4988 err = macb_mii_init(bp); in macb_probe()
5000 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); in macb_probe()
5003 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), in macb_probe()
5006 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_probe()
5007 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_probe()
5012 mdiobus_unregister(bp->mii_bus); in macb_probe()
5013 mdiobus_free(bp->mii_bus); in macb_probe()
5016 phy_exit(bp->sgmii_phy); in macb_probe()
5033 struct macb *bp; in macb_remove() local
5038 bp = netdev_priv(dev); in macb_remove()
5039 phy_exit(bp->sgmii_phy); in macb_remove()
5040 mdiobus_unregister(bp->mii_bus); in macb_remove()
5041 mdiobus_free(bp->mii_bus); in macb_remove()
5044 tasklet_kill(&bp->hresp_err_tasklet); in macb_remove()
5048 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, in macb_remove()
5049 bp->rx_clk, bp->tsu_clk); in macb_remove()
5052 phylink_destroy(bp->phylink); in macb_remove()
5062 struct macb *bp = netdev_priv(netdev); in macb_suspend() local
5071 if (bp->wol & MACB_WOL_ENABLED) { in macb_suspend()
5072 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
5074 macb_writel(bp, TSR, -1); in macb_suspend()
5075 macb_writel(bp, RSR, -1); in macb_suspend()
5076 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
5081 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_suspend()
5087 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_suspend()
5088 if (macb_is_gem(bp)) { in macb_suspend()
5089 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, in macb_suspend()
5090 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
5094 bp->queues[0].irq, err); in macb_suspend()
5095 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5098 queue_writel(bp->queues, IER, GEM_BIT(WOL)); in macb_suspend()
5099 gem_writel(bp, WOL, MACB_BIT(MAG)); in macb_suspend()
5101 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, in macb_suspend()
5102 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
5106 bp->queues[0].irq, err); in macb_suspend()
5107 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5110 queue_writel(bp->queues, IER, MACB_BIT(WOL)); in macb_suspend()
5111 macb_writel(bp, WOL, MACB_BIT(MAG)); in macb_suspend()
5113 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5115 enable_irq_wake(bp->queues[0].irq); in macb_suspend()
5119 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
5125 if (!(bp->wol & MACB_WOL_ENABLED)) { in macb_suspend()
5127 phylink_stop(bp->phylink); in macb_suspend()
5128 phy_exit(bp->sgmii_phy); in macb_suspend()
5130 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
5131 macb_reset_hw(bp); in macb_suspend()
5132 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5135 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_suspend()
5136 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); in macb_suspend()
5139 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); in macb_suspend()
5141 if (bp->ptp_info) in macb_suspend()
5142 bp->ptp_info->ptp_remove(netdev); in macb_suspend()
5152 struct macb *bp = netdev_priv(netdev); in macb_resume() local
5164 if (bp->wol & MACB_WOL_ENABLED) { in macb_resume()
5165 spin_lock_irqsave(&bp->lock, flags); in macb_resume()
5167 if (macb_is_gem(bp)) { in macb_resume()
5168 queue_writel(bp->queues, IDR, GEM_BIT(WOL)); in macb_resume()
5169 gem_writel(bp, WOL, 0); in macb_resume()
5171 queue_writel(bp->queues, IDR, MACB_BIT(WOL)); in macb_resume()
5172 macb_writel(bp, WOL, 0); in macb_resume()
5175 queue_readl(bp->queues, ISR); in macb_resume()
5176 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_resume()
5177 queue_writel(bp->queues, ISR, -1); in macb_resume()
5179 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_resume()
5180 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, in macb_resume()
5181 IRQF_SHARED, netdev->name, bp->queues); in macb_resume()
5185 bp->queues[0].irq, err); in macb_resume()
5186 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5189 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5191 disable_irq_wake(bp->queues[0].irq); in macb_resume()
5197 phylink_stop(bp->phylink); in macb_resume()
5201 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_resume()
5208 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); in macb_resume()
5210 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_resume()
5211 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); in macb_resume()
5213 macb_writel(bp, NCR, MACB_BIT(MPE)); in macb_resume()
5214 macb_init_hw(bp); in macb_resume()
5216 macb_restore_features(bp); in macb_resume()
5218 if (!device_may_wakeup(&bp->dev->dev)) in macb_resume()
5219 phy_init(bp->sgmii_phy); in macb_resume()
5221 phylink_start(bp->phylink); in macb_resume()
5225 if (bp->ptp_info) in macb_resume()
5226 bp->ptp_info->ptp_init(netdev); in macb_resume()
5234 struct macb *bp = netdev_priv(netdev); in macb_runtime_suspend() local
5237 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk); in macb_runtime_suspend()
5238 else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) in macb_runtime_suspend()
5239 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk); in macb_runtime_suspend()
5247 struct macb *bp = netdev_priv(netdev); in macb_runtime_resume() local
5250 clk_prepare_enable(bp->pclk); in macb_runtime_resume()
5251 clk_prepare_enable(bp->hclk); in macb_runtime_resume()
5252 clk_prepare_enable(bp->tx_clk); in macb_runtime_resume()
5253 clk_prepare_enable(bp->rx_clk); in macb_runtime_resume()
5254 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()
5255 } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) { in macb_runtime_resume()
5256 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()