Lines Matching refs:sge
233 struct sge *sge; member
247 struct sge { struct
275 static void tx_sched_stop(struct sge *sge) in tx_sched_stop() argument
277 struct sched *s = sge->tx_sched; in tx_sched_stop()
290 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, in t1_sched_update_parms() argument
293 struct sched *s = sge->tx_sched; in t1_sched_update_parms()
313 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { in t1_sched_update_parms()
336 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
338 struct sched *s = sge->tx_sched;
343 t1_sched_update_parms(sge, i, 0, 0);
350 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
353 struct sched *s = sge->tx_sched;
356 t1_sched_update_parms(sge, port, 0, 0);
364 static int tx_sched_init(struct sge *sge) in tx_sched_init() argument
375 s->sge = sge; in tx_sched_init()
376 sge->tx_sched = s; in tx_sched_init()
380 t1_sched_update_parms(sge, i, 1500, 1000); in tx_sched_init()
391 static inline int sched_update_avail(struct sge *sge) in sched_update_avail() argument
393 struct sched *s = sge->tx_sched; in sched_update_avail()
425 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, in sched_skb() argument
428 struct sched *s = sge->tx_sched; in sched_skb()
466 if (update-- && sched_update_avail(sge)) in sched_skb()
474 struct cmdQ *q = &sge->cmdQ[0]; in sched_skb()
478 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); in sched_skb()
518 static void free_rx_resources(struct sge *sge) in free_rx_resources() argument
520 struct pci_dev *pdev = sge->adapter->pdev; in free_rx_resources()
523 if (sge->respQ.entries) { in free_rx_resources()
524 size = sizeof(struct respQ_e) * sge->respQ.size; in free_rx_resources()
525 dma_free_coherent(&pdev->dev, size, sge->respQ.entries, in free_rx_resources()
526 sge->respQ.dma_addr); in free_rx_resources()
530 struct freelQ *q = &sge->freelQ[i]; in free_rx_resources()
548 static int alloc_rx_resources(struct sge *sge, struct sge_params *p) in alloc_rx_resources() argument
550 struct pci_dev *pdev = sge->adapter->pdev; in alloc_rx_resources()
554 struct freelQ *q = &sge->freelQ[i]; in alloc_rx_resources()
558 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; in alloc_rx_resources()
578 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + in alloc_rx_resources()
580 sge->freelQ[!sge->jumbo_fl].dma_offset; in alloc_rx_resources()
584 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; in alloc_rx_resources()
590 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; in alloc_rx_resources()
591 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; in alloc_rx_resources()
593 sge->respQ.genbit = 1; in alloc_rx_resources()
594 sge->respQ.size = SGE_RESPQ_E_N; in alloc_rx_resources()
595 sge->respQ.credits = 0; in alloc_rx_resources()
596 size = sizeof(struct respQ_e) * sge->respQ.size; in alloc_rx_resources()
597 sge->respQ.entries = in alloc_rx_resources()
598 dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr, in alloc_rx_resources()
600 if (!sge->respQ.entries) in alloc_rx_resources()
605 free_rx_resources(sge); in alloc_rx_resources()
612 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) in free_cmdQ_buffers() argument
615 struct pci_dev *pdev = sge->adapter->pdev; in free_cmdQ_buffers()
647 static void free_tx_resources(struct sge *sge) in free_tx_resources() argument
649 struct pci_dev *pdev = sge->adapter->pdev; in free_tx_resources()
653 struct cmdQ *q = &sge->cmdQ[i]; in free_tx_resources()
657 free_cmdQ_buffers(sge, q, q->in_use); in free_tx_resources()
671 static int alloc_tx_resources(struct sge *sge, struct sge_params *p) in alloc_tx_resources() argument
673 struct pci_dev *pdev = sge->adapter->pdev; in alloc_tx_resources()
677 struct cmdQ *q = &sge->cmdQ[i]; in alloc_tx_resources()
706 sge->cmdQ[0].stop_thres = sge->adapter->params.nports * in alloc_tx_resources()
711 free_tx_resources(sge); in alloc_tx_resources()
729 struct sge *sge = adapter->sge; in t1_vlan_mode() local
732 sge->sge_control |= F_VLAN_XTRACT; in t1_vlan_mode()
734 sge->sge_control &= ~F_VLAN_XTRACT; in t1_vlan_mode()
736 writel(sge->sge_control, adapter->regs + A_SG_CONTROL); in t1_vlan_mode()
745 static void configure_sge(struct sge *sge, struct sge_params *p) in configure_sge() argument
747 struct adapter *ap = sge->adapter; in configure_sge()
750 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, in configure_sge()
752 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, in configure_sge()
754 setup_ring_params(ap, sge->freelQ[0].dma_addr, in configure_sge()
755 sge->freelQ[0].size, A_SG_FL0BASELWR, in configure_sge()
757 setup_ring_params(ap, sge->freelQ[1].dma_addr, in configure_sge()
758 sge->freelQ[1].size, A_SG_FL1BASELWR, in configure_sge()
764 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, in configure_sge()
766 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); in configure_sge()
768 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | in configure_sge()
771 V_RX_PKT_OFFSET(sge->rx_pkt_pad); in configure_sge()
774 sge->sge_control |= F_ENABLE_BIG_ENDIAN; in configure_sge()
778 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); in configure_sge()
780 t1_sge_set_coalesce_params(sge, p); in configure_sge()
786 static inline unsigned int jumbo_payload_capacity(const struct sge *sge) in jumbo_payload_capacity() argument
788 return sge->freelQ[sge->jumbo_fl].rx_buffer_size - in jumbo_payload_capacity()
789 sge->freelQ[sge->jumbo_fl].dma_offset - in jumbo_payload_capacity()
796 void t1_sge_destroy(struct sge *sge) in t1_sge_destroy() argument
800 for_each_port(sge->adapter, i) in t1_sge_destroy()
801 free_percpu(sge->port_stats[i]); in t1_sge_destroy()
803 kfree(sge->tx_sched); in t1_sge_destroy()
804 free_tx_resources(sge); in t1_sge_destroy()
805 free_rx_resources(sge); in t1_sge_destroy()
806 kfree(sge); in t1_sge_destroy()
821 static void refill_free_list(struct sge *sge, struct freelQ *q) in refill_free_list() argument
823 struct pci_dev *pdev = sge->adapter->pdev; in refill_free_list()
839 skb_reserve(skb, sge->rx_pkt_pad); in refill_free_list()
867 static void freelQs_empty(struct sge *sge) in freelQs_empty() argument
869 struct adapter *adapter = sge->adapter; in freelQs_empty()
873 refill_free_list(sge, &sge->freelQ[0]); in freelQs_empty()
874 refill_free_list(sge, &sge->freelQ[1]); in freelQs_empty()
876 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && in freelQs_empty()
877 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { in freelQs_empty()
879 irqholdoff_reg = sge->fixed_intrtimer; in freelQs_empty()
883 irqholdoff_reg = sge->intrtimer_nres; in freelQs_empty()
900 void t1_sge_intr_disable(struct sge *sge) in t1_sge_intr_disable() argument
902 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_disable()
904 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_disable()
905 writel(0, sge->adapter->regs + A_SG_INT_ENABLE); in t1_sge_intr_disable()
911 void t1_sge_intr_enable(struct sge *sge) in t1_sge_intr_enable() argument
914 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_enable()
916 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) in t1_sge_intr_enable()
918 writel(en, sge->adapter->regs + A_SG_INT_ENABLE); in t1_sge_intr_enable()
919 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_enable()
925 void t1_sge_intr_clear(struct sge *sge) in t1_sge_intr_clear() argument
927 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); in t1_sge_intr_clear()
928 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); in t1_sge_intr_clear()
934 bool t1_sge_intr_error_handler(struct sge *sge) in t1_sge_intr_error_handler() argument
936 struct adapter *adapter = sge->adapter; in t1_sge_intr_error_handler()
943 sge->stats.respQ_empty++; in t1_sge_intr_error_handler()
945 sge->stats.respQ_overflow++; in t1_sge_intr_error_handler()
950 sge->stats.freelistQ_empty++; in t1_sge_intr_error_handler()
951 freelQs_empty(sge); in t1_sge_intr_error_handler()
954 sge->stats.pkt_too_big++; in t1_sge_intr_error_handler()
959 sge->stats.pkt_mismatch++; in t1_sge_intr_error_handler()
972 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) in t1_sge_get_intr_counts() argument
974 return &sge->stats; in t1_sge_get_intr_counts()
977 void t1_sge_get_port_stats(const struct sge *sge, int port, in t1_sge_get_port_stats() argument
984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); in t1_sge_get_port_stats()
1290 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) in reclaim_completed_tx() argument
1297 free_cmdQ_buffers(sge, q, reclaim); in reclaim_completed_tx()
1309 struct sge *sge = s->sge; in restart_sched() local
1310 struct adapter *adapter = sge->adapter; in restart_sched()
1311 struct cmdQ *q = &sge->cmdQ[0]; in restart_sched()
1316 reclaim_completed_tx(sge, q); in restart_sched()
1320 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { in restart_sched()
1355 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) in sge_rx() argument
1359 struct adapter *adapter = sge->adapter; in sge_rx()
1363 skb = get_packet(adapter, fl, len - sge->rx_pkt_pad); in sge_rx()
1365 sge->stats.rx_drops++; in sge_rx()
1376 st = this_cpu_ptr(sge->port_stats[p->iff]); in sge_rx()
1410 static void restart_tx_queues(struct sge *sge) in restart_tx_queues() argument
1412 struct adapter *adap = sge->adapter; in restart_tx_queues()
1415 if (!enough_free_Tx_descs(&sge->cmdQ[0])) in restart_tx_queues()
1421 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && in restart_tx_queues()
1423 sge->stats.cmdQ_restarted[2]++; in restart_tx_queues()
1437 struct sge *sge = adapter->sge; in update_tx_info() local
1438 struct cmdQ *cmdq = &sge->cmdQ[0]; in update_tx_info()
1442 freelQs_empty(sge); in update_tx_info()
1453 if (sge->tx_sched) in update_tx_info()
1454 tasklet_hi_schedule(&sge->tx_sched->sched_tsk); in update_tx_info()
1459 if (unlikely(sge->stopped_tx_queues != 0)) in update_tx_info()
1460 restart_tx_queues(sge); in update_tx_info()
1471 struct sge *sge = adapter->sge; in process_responses() local
1472 struct respQ *q = &sge->respQ; in process_responses()
1494 sge->cmdQ[1].processed += cmdq_processed[1]; in process_responses()
1499 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; in process_responses()
1505 sge_rx(sge, fl, e->BufferLength); in process_responses()
1519 refill_free_list(sge, fl); in process_responses()
1521 sge->stats.pure_rsps++; in process_responses()
1538 sge->cmdQ[1].processed += cmdq_processed[1]; in process_responses()
1545 const struct respQ *Q = &adapter->sge->respQ; in responses_pending()
1561 struct sge *sge = adapter->sge; in process_pure_responses() local
1562 struct respQ *q = &sge->respQ; in process_pure_responses()
1564 const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; in process_pure_responses()
1590 sge->stats.pure_rsps++; in process_pure_responses()
1594 sge->cmdQ[1].processed += cmdq_processed[1]; in process_pure_responses()
1611 writel(adapter->sge->respQ.cidx, in t1_poll()
1637 t1_sge_stop(adapter->sge); in t1_interrupt_thread()
1655 struct sge *sge = adapter->sge; in t1_interrupt() local
1666 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); in t1_interrupt()
1679 sge->stats.unhandled_irqs++; in t1_interrupt()
1700 struct sge *sge = adapter->sge; in t1_sge_tx() local
1701 struct cmdQ *q = &sge->cmdQ[qid]; in t1_sge_tx()
1706 reclaim_completed_tx(sge, q); in t1_sge_tx()
1717 set_bit(dev->if_port, &sge->stopped_tx_queues); in t1_sge_tx()
1718 sge->stats.cmdQ_full[2]++; in t1_sge_tx()
1728 set_bit(dev->if_port, &sge->stopped_tx_queues); in t1_sge_tx()
1729 sge->stats.cmdQ_full[2]++; in t1_sge_tx()
1735 if (sge->tx_sched && !qid && skb->dev) { in t1_sge_tx()
1741 skb = sched_skb(sge, skb, credits); in t1_sge_tx()
1811 struct sge *sge = adapter->sge; in t1_start_xmit() local
1812 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); in t1_start_xmit()
1877 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { in t1_start_xmit()
1880 adapter->sge->espibug_skb[dev->if_port] = skb; in t1_start_xmit()
1925 struct sge *sge = from_timer(sge, t, tx_reclaim_timer); in sge_tx_reclaim_cb() local
1928 struct cmdQ *q = &sge->cmdQ[i]; in sge_tx_reclaim_cb()
1933 reclaim_completed_tx(sge, q); in sge_tx_reclaim_cb()
1935 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); in sge_tx_reclaim_cb()
1939 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); in sge_tx_reclaim_cb()
1945 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) in t1_sge_set_coalesce_params() argument
1947 sge->fixed_intrtimer = p->rx_coalesce_usecs * in t1_sge_set_coalesce_params()
1948 core_ticks_per_usec(sge->adapter); in t1_sge_set_coalesce_params()
1949 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); in t1_sge_set_coalesce_params()
1957 int t1_sge_configure(struct sge *sge, struct sge_params *p) in t1_sge_configure() argument
1959 if (alloc_rx_resources(sge, p)) in t1_sge_configure()
1961 if (alloc_tx_resources(sge, p)) { in t1_sge_configure()
1962 free_rx_resources(sge); in t1_sge_configure()
1965 configure_sge(sge, p); in t1_sge_configure()
1973 p->large_buf_capacity = jumbo_payload_capacity(sge); in t1_sge_configure()
1980 void t1_sge_stop(struct sge *sge) in t1_sge_stop() argument
1983 writel(0, sge->adapter->regs + A_SG_CONTROL); in t1_sge_stop()
1984 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ in t1_sge_stop()
1986 if (is_T2(sge->adapter)) in t1_sge_stop()
1987 del_timer_sync(&sge->espibug_timer); in t1_sge_stop()
1989 del_timer_sync(&sge->tx_reclaim_timer); in t1_sge_stop()
1990 if (sge->tx_sched) in t1_sge_stop()
1991 tx_sched_stop(sge); in t1_sge_stop()
1994 kfree_skb(sge->espibug_skb[i]); in t1_sge_stop()
2000 void t1_sge_start(struct sge *sge) in t1_sge_start() argument
2002 refill_free_list(sge, &sge->freelQ[0]); in t1_sge_start()
2003 refill_free_list(sge, &sge->freelQ[1]); in t1_sge_start()
2005 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); in t1_sge_start()
2006 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); in t1_sge_start()
2007 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ in t1_sge_start()
2009 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); in t1_sge_start()
2011 if (is_T2(sge->adapter)) in t1_sge_start()
2012 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); in t1_sge_start()
2020 struct sge *sge = from_timer(sge, t, espibug_timer); in espibug_workaround_t204() local
2021 struct adapter *adapter = sge->adapter; in espibug_workaround_t204()
2032 struct sk_buff *skb = sge->espibug_skb[i]; in espibug_workaround_t204()
2058 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); in espibug_workaround_t204()
2063 struct sge *sge = from_timer(sge, t, espibug_timer); in espibug_workaround() local
2064 struct adapter *adapter = sge->adapter; in espibug_workaround()
2067 struct sk_buff *skb = sge->espibug_skb[0]; in espibug_workaround()
2090 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); in espibug_workaround()
2096 struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p) in t1_sge_create()
2098 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); in t1_sge_create() local
2101 if (!sge) in t1_sge_create()
2104 sge->adapter = adapter; in t1_sge_create()
2105 sge->netdev = adapter->port[0].dev; in t1_sge_create()
2106 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; in t1_sge_create()
2107 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; in t1_sge_create()
2110 sge->port_stats[i] = alloc_percpu(struct sge_port_stats); in t1_sge_create()
2111 if (!sge->port_stats[i]) in t1_sge_create()
2115 timer_setup(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, 0); in t1_sge_create()
2117 if (is_T2(sge->adapter)) { in t1_sge_create()
2118 timer_setup(&sge->espibug_timer, in t1_sge_create()
2123 tx_sched_init(sge); in t1_sge_create()
2125 sge->espibug_timeout = 1; in t1_sge_create()
2128 sge->espibug_timeout = HZ/100; in t1_sge_create()
2134 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; in t1_sge_create()
2135 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; in t1_sge_create()
2136 if (sge->tx_sched) { in t1_sge_create()
2137 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) in t1_sge_create()
2147 return sge; in t1_sge_create()
2150 free_percpu(sge->port_stats[i]); in t1_sge_create()
2153 kfree(sge); in t1_sge_create()