Lines Matching refs:adap

187 static int cfg_queues(struct adapter *adap);
237 struct adapter *adap = pi->adapter; in dcb_tx_queue_prio_enable() local
238 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
258 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in dcb_tx_queue_prio_enable()
263 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
305 void t4_os_portmod_changed(struct adapter *adap, int port_id) in t4_os_portmod_changed() argument
311 struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
350 struct adapter *adap = pi->adapter; in cxgb4_set_addr_hash() local
356 list_for_each_entry(entry, &adap->mac_hlist, list) { in cxgb4_set_addr_hash()
360 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, in cxgb4_set_addr_hash()
367 struct adapter *adap = pi->adapter; in cxgb4_mac_sync() local
382 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist, in cxgb4_mac_sync()
395 list_add_tail(&new_entry->list, &adap->mac_hlist); in cxgb4_mac_sync()
405 struct adapter *adap = pi->adapter; in cxgb4_mac_unsync() local
413 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { in cxgb4_mac_unsync()
421 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false); in cxgb4_mac_unsync()
534 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) in dcb_rpl() argument
537 struct net_device *dev = adap->port[adap->chan_map[port]]; in dcb_rpl()
541 cxgb4_dcb_handle_fw_update(adap, pcmd); in dcb_rpl()
570 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
581 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
587 t4_sge_eth_txq_egress_update(q->adap, eq, -1); in fwevtq_handler()
611 dev = q->adap->port[q->adap->chan_map[port]]; in fwevtq_handler()
625 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
629 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
633 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
637 do_smt_write_rpl(q->adap, p); in fwevtq_handler()
641 filter_rpl(q->adap, p); in fwevtq_handler()
645 hash_filter_rpl(q->adap, p); in fwevtq_handler()
649 hash_del_filter_rpl(q->adap, p); in fwevtq_handler()
653 do_srq_table_rpl(q->adap, p); in fwevtq_handler()
655 dev_err(q->adap->pdev_dev, in fwevtq_handler()
677 struct adapter *adap = cookie; in t4_nondata_intr() local
678 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); in t4_nondata_intr()
681 adap->swintr = 1; in t4_nondata_intr()
682 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); in t4_nondata_intr()
684 if (adap->flags & CXGB4_MASTER_PF) in t4_nondata_intr()
685 t4_slow_intr_handler(adap); in t4_nondata_intr()
689 int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec, in cxgb4_set_msix_aff() argument
695 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n"); in cxgb4_set_msix_aff()
699 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)), in cxgb4_set_msix_aff()
704 dev_warn(adap->pdev_dev, in cxgb4_set_msix_aff()
717 static int request_msix_queue_irqs(struct adapter *adap) in request_msix_queue_irqs() argument
719 struct sge *s = &adap->sge; in request_msix_queue_irqs()
726 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec, in request_msix_queue_irqs()
728 adap->msix_info[s->fwevtq_msix_idx].desc, in request_msix_queue_irqs()
742 cxgb4_set_msix_aff(adap, minfo->vec, in request_msix_queue_irqs()
753 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in request_msix_queue_irqs()
757 static void free_msix_queue_irqs(struct adapter *adap) in free_msix_queue_irqs() argument
759 struct sge *s = &adap->sge; in free_msix_queue_irqs()
763 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in free_msix_queue_irqs()
771 static int setup_ppod_edram(struct adapter *adap) in setup_ppod_edram() argument
785 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
787 dev_warn(adap->pdev_dev, in setup_ppod_edram()
796 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
798 dev_err(adap->pdev_dev, in setup_ppod_edram()
828 struct adapter *adap = pi->adapter; in cxgb4_config_rss() local
831 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss, in cxgb4_config_rss()
841 return t4_config_vi_rss(adap, adap->mbox, viid, in cxgb4_config_rss()
886 static int setup_rss(struct adapter *adap) in setup_rss() argument
890 for_each_port(adap, i) { in setup_rss()
891 const struct port_info *pi = adap2pinfo(adap, i); in setup_rss()
922 static void quiesce_rx(struct adapter *adap) in quiesce_rx() argument
926 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
927 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
937 static void disable_interrupts(struct adapter *adap) in disable_interrupts() argument
939 struct sge *s = &adap->sge; in disable_interrupts()
941 if (adap->flags & CXGB4_FULL_INIT_DONE) { in disable_interrupts()
942 t4_intr_disable(adap); in disable_interrupts()
943 if (adap->flags & CXGB4_USING_MSIX) { in disable_interrupts()
944 free_msix_queue_irqs(adap); in disable_interrupts()
945 free_irq(adap->msix_info[s->nd_msix_idx].vec, in disable_interrupts()
946 adap); in disable_interrupts()
948 free_irq(adap->pdev->irq, adap); in disable_interrupts()
950 quiesce_rx(adap); in disable_interrupts()
954 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q) in cxgb4_enable_rx() argument
960 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), in cxgb4_enable_rx()
968 static void enable_rx(struct adapter *adap) in enable_rx() argument
972 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
973 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
978 cxgb4_enable_rx(adap, q); in enable_rx()
982 static int setup_non_data_intr(struct adapter *adap) in setup_non_data_intr() argument
986 adap->sge.nd_msix_idx = -1; in setup_non_data_intr()
987 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_non_data_intr()
991 msix = cxgb4_get_msix_idx_from_bmap(adap); in setup_non_data_intr()
995 snprintf(adap->msix_info[msix].desc, in setup_non_data_intr()
996 sizeof(adap->msix_info[msix].desc), in setup_non_data_intr()
997 "%s", adap->port[0]->name); in setup_non_data_intr()
999 adap->sge.nd_msix_idx = msix; in setup_non_data_intr()
1003 static int setup_fw_sge_queues(struct adapter *adap) in setup_fw_sge_queues() argument
1005 struct sge *s = &adap->sge; in setup_fw_sge_queues()
1011 if (adap->flags & CXGB4_USING_MSIX) { in setup_fw_sge_queues()
1013 msix = cxgb4_get_msix_idx_from_bmap(adap); in setup_fw_sge_queues()
1017 snprintf(adap->msix_info[msix].desc, in setup_fw_sge_queues()
1018 sizeof(adap->msix_info[msix].desc), in setup_fw_sge_queues()
1019 "%s-FWeventq", adap->port[0]->name); in setup_fw_sge_queues()
1021 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_fw_sge_queues()
1028 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_fw_sge_queues()
1031 cxgb4_free_msix_idx_in_bmap(adap, msix); in setup_fw_sge_queues()
1045 static int setup_sge_queues(struct adapter *adap) in setup_sge_queues() argument
1048 struct sge *s = &adap->sge; in setup_sge_queues()
1052 if (is_uld(adap)) in setup_sge_queues()
1055 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_sge_queues()
1058 for_each_port(adap, i) { in setup_sge_queues()
1059 struct net_device *dev = adap->port[i]; in setup_sge_queues()
1066 msix = cxgb4_get_msix_idx_from_bmap(adap); in setup_sge_queues()
1072 snprintf(adap->msix_info[msix].desc, in setup_sge_queues()
1073 sizeof(adap->msix_info[msix].desc), in setup_sge_queues()
1075 q->msix = &adap->msix_info[msix]; in setup_sge_queues()
1078 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
1082 t4_get_tp_ch_map(adap, in setup_sge_queues()
1092 err = t4_sge_alloc_eth_txq(adap, t, dev, in setup_sge_queues()
1095 !!(adap->flags & CXGB4_SGE_DBQ_TIMER)); in setup_sge_queues()
1101 for_each_port(adap, i) { in setup_sge_queues()
1108 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
1114 if (!is_t4(adap->params.chip)) { in setup_sge_queues()
1115 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], in setup_sge_queues()
1116 netdev_get_tx_queue(adap->port[0], 0) in setup_sge_queues()
1122 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
1125 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
1129 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); in setup_sge_queues()
1130 t4_free_sge_resources(adap); in setup_sge_queues()
1243 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params() local
1252 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1259 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in cxgb4_set_rspq_intr_params()
1267 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1289 static int setup_debugfs(struct adapter *adap) in setup_debugfs() argument
1291 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1295 t4_setup_debugfs(adap); in setup_debugfs()
1300 static void cxgb4_port_mirror_free_rxq(struct adapter *adap, in cxgb4_port_mirror_free_rxq() argument
1303 if ((adap->flags & CXGB4_FULL_INIT_DONE) && in cxgb4_port_mirror_free_rxq()
1304 !(adap->flags & CXGB4_SHUTTING_DOWN)) in cxgb4_port_mirror_free_rxq()
1307 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_free_rxq()
1311 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_free_rxq()
1314 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_free_rxq()
1320 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_alloc_queues() local
1322 struct sge *s = &adap->sge; in cxgb4_port_mirror_alloc_queues()
1339 if (!(adap->flags & CXGB4_USING_MSIX)) in cxgb4_port_mirror_alloc_queues()
1340 msix = -((int)adap->sge.intrq.abs_id + 1); in cxgb4_port_mirror_alloc_queues()
1347 msix = cxgb4_get_msix_idx_from_bmap(adap); in cxgb4_port_mirror_alloc_queues()
1353 mirror_rxq->msix = &adap->msix_info[msix]; in cxgb4_port_mirror_alloc_queues()
1359 init_rspq(adap, &mirror_rxq->rspq, in cxgb4_port_mirror_alloc_queues()
1367 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false, in cxgb4_port_mirror_alloc_queues()
1374 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_alloc_queues()
1382 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1387 cxgb4_enable_rx(adap, &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1409 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_alloc_queues()
1412 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_alloc_queues()
1416 cxgb4_port_mirror_free_rxq(adap, in cxgb4_port_mirror_alloc_queues()
1427 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_free_queues() local
1428 struct sge *s = &adap->sge; in cxgb4_port_mirror_free_queues()
1438 cxgb4_port_mirror_free_rxq(adap, in cxgb4_port_mirror_free_queues()
1448 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_start() local
1459 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror, in cxgb4_port_mirror_start()
1464 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1477 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1490 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, in cxgb4_port_mirror_start()
1494 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1504 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_stop() local
1509 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false, in cxgb4_port_mirror_stop()
1516 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_alloc() local
1528 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0, in cxgb4_port_mirror_alloc()
1535 if (adap->flags & CXGB4_FULL_INIT_DONE) { in cxgb4_port_mirror_alloc()
1553 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_alloc()
1564 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_free() local
1579 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_free()
1746 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_queue_tid_release() local
1749 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1750 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1752 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1753 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1754 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1755 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1757 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1766 struct adapter *adap; in process_tid_release_list() local
1768 adap = container_of(work, struct adapter, tid_release_task); in process_tid_release_list()
1770 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1771 while (adap->tid_release_head) { in process_tid_release_list()
1772 void **p = adap->tid_release_head; in process_tid_release_list()
1776 adap->tid_release_head = *p; in process_tid_release_list()
1778 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1784 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1785 t4_ofld_send(adap, skb); in process_tid_release_list()
1786 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1788 adap->tid_release_task_busy = false; in process_tid_release_list()
1789 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1799 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_remove_tid() local
1802 WARN_ON(tid_out_of_range(&adap->tids, tid)); in cxgb4_remove_tid()
1804 if (t->tid_tab[tid - adap->tids.tid_base]) { in cxgb4_remove_tid()
1805 t->tid_tab[tid - adap->tids.tid_base] = NULL; in cxgb4_remove_tid()
1823 t4_ofld_send(adap, skb); in cxgb4_remove_tid()
1834 struct adapter *adap = container_of(t, struct adapter, tids); in tid_init() local
1893 if (is_offload(adap)) { in tid_init()
1897 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in tid_init()
1928 struct adapter *adap; in cxgb4_create_server() local
1936 adap = netdev2adap(dev); in cxgb4_create_server()
1944 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1948 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server()
1969 struct adapter *adap; in cxgb4_create_server6() local
1977 adap = netdev2adap(dev); in cxgb4_create_server6()
1987 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1991 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server6()
2000 struct adapter *adap; in cxgb4_remove_server() local
2004 adap = netdev2adap(dev); in cxgb4_remove_server()
2015 ret = t4_mgmt_tx(adap, skb); in cxgb4_remove_server()
2140 struct adapter *adap = netdev2adap(dev); in cxgb4_dbfifo_count() local
2143 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in cxgb4_dbfifo_count()
2144 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in cxgb4_dbfifo_count()
2145 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
2183 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_get_tcp_stats() local
2185 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2186 t4_tp_get_tcp_stats(adap, v4, v6, false); in cxgb4_get_tcp_stats()
2187 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2194 struct adapter *adap = netdev2adap(dev); in cxgb4_iscsi_init() local
2196 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); in cxgb4_iscsi_init()
2197 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | in cxgb4_iscsi_init()
2205 struct adapter *adap = netdev2adap(dev); in cxgb4_flush_eq_cache() local
2207 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS); in cxgb4_flush_eq_cache()
2211 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) in read_eq_indices() argument
2213 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; in read_eq_indices()
2217 spin_lock(&adap->win0_lock); in read_eq_indices()
2218 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, in read_eq_indices()
2221 spin_unlock(&adap->win0_lock); in read_eq_indices()
2232 struct adapter *adap = netdev2adap(dev); in cxgb4_sync_txq_pidx() local
2236 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); in cxgb4_sync_txq_pidx()
2249 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
2254 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in cxgb4_sync_txq_pidx()
2267 struct adapter *adap; in cxgb4_read_tpte() local
2271 adap = netdev2adap(dev); in cxgb4_read_tpte()
2273 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
2281 size = t4_read_reg(adap, MA_EDRAM0_BAR_A); in cxgb4_read_tpte()
2283 size = t4_read_reg(adap, MA_EDRAM1_BAR_A); in cxgb4_read_tpte()
2285 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); in cxgb4_read_tpte()
2288 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) { in cxgb4_read_tpte()
2289 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); in cxgb4_read_tpte()
2309 } else if (is_t5(adap->params.chip)) { in cxgb4_read_tpte()
2310 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); in cxgb4_read_tpte()
2326 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
2327 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); in cxgb4_read_tpte()
2328 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
2332 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
2341 struct adapter *adap; in cxgb4_read_sge_timestamp() local
2343 adap = netdev2adap(dev); in cxgb4_read_sge_timestamp()
2344 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); in cxgb4_read_sge_timestamp()
2345 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); in cxgb4_read_sge_timestamp()
2402 static void drain_db_fifo(struct adapter *adap, int usecs) in drain_db_fifo() argument
2407 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in drain_db_fifo()
2408 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in drain_db_fifo()
2409 if (is_t4(adap->params.chip)) { in drain_db_fifo()
2433 static void enable_txq_db(struct adapter *adap, struct sge_txq *q) in enable_txq_db() argument
2441 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in enable_txq_db()
2449 static void disable_dbs(struct adapter *adap) in disable_dbs() argument
2453 for_each_ethrxq(&adap->sge, i) in disable_dbs()
2454 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
2455 if (is_offload(adap)) { in disable_dbs()
2457 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in disable_dbs()
2460 for_each_ofldtxq(&adap->sge, i) { in disable_dbs()
2467 for_each_port(adap, i) in disable_dbs()
2468 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
2471 static void enable_dbs(struct adapter *adap) in enable_dbs() argument
2475 for_each_ethrxq(&adap->sge, i) in enable_dbs()
2476 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
2477 if (is_offload(adap)) { in enable_dbs()
2479 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in enable_dbs()
2482 for_each_ofldtxq(&adap->sge, i) { in enable_dbs()
2485 enable_txq_db(adap, &txq->q); in enable_dbs()
2489 for_each_port(adap, i) in enable_dbs()
2490 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
2493 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) in notify_rdma_uld() argument
2497 if (adap->uld && adap->uld[type].handle) in notify_rdma_uld()
2498 adap->uld[type].control(adap->uld[type].handle, cmd); in notify_rdma_uld()
2503 struct adapter *adap; in process_db_full() local
2505 adap = container_of(work, struct adapter, db_full_task); in process_db_full()
2507 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_full()
2508 enable_dbs(adap); in process_db_full()
2509 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_full()
2510 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_full()
2511 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
2515 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
2519 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) in sync_txq_pidx() argument
2525 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
2537 if (is_t4(adap->params.chip)) in sync_txq_pidx()
2542 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in sync_txq_pidx()
2550 CH_WARN(adap, "DB drop recovery failed.\n"); in sync_txq_pidx()
2553 static void recover_all_queues(struct adapter *adap) in recover_all_queues() argument
2557 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2558 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2559 if (is_offload(adap)) { in recover_all_queues()
2561 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in recover_all_queues()
2563 for_each_ofldtxq(&adap->sge, i) { in recover_all_queues()
2566 sync_txq_pidx(adap, &txq->q); in recover_all_queues()
2570 for_each_port(adap, i) in recover_all_queues()
2571 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2576 struct adapter *adap; in process_db_drop() local
2578 adap = container_of(work, struct adapter, db_drop_task); in process_db_drop()
2580 if (is_t4(adap->params.chip)) { in process_db_drop()
2581 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2582 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); in process_db_drop()
2583 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2584 recover_all_queues(adap); in process_db_drop()
2585 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2586 enable_dbs(adap); in process_db_drop()
2587 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_drop()
2588 } else if (is_t5(adap->params.chip)) { in process_db_drop()
2589 u32 dropped_db = t4_read_reg(adap, 0x010ac); in process_db_drop()
2596 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, in process_db_drop()
2599 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2603 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2606 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); in process_db_drop()
2609 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_drop()
2610 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); in process_db_drop()
2613 void t4_db_full(struct adapter *adap) in t4_db_full() argument
2615 if (is_t4(adap->params.chip)) { in t4_db_full()
2616 disable_dbs(adap); in t4_db_full()
2617 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_full()
2618 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in t4_db_full()
2620 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2624 void t4_db_dropped(struct adapter *adap) in t4_db_dropped() argument
2626 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2627 disable_dbs(adap); in t4_db_dropped()
2628 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_dropped()
2630 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2641 static void detach_ulds(struct adapter *adap) in detach_ulds() argument
2645 if (!is_uld(adap)) in detach_ulds()
2649 list_del(&adap->list_node); in detach_ulds()
2652 if (adap->uld && adap->uld[i].handle) in detach_ulds()
2653 adap->uld[i].state_change(adap->uld[i].handle, in detach_ulds()
2663 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) in notify_ulds() argument
2669 if (adap->uld && adap->uld[i].handle) in notify_ulds()
2670 adap->uld[i].state_change(adap->uld[i].handle, in notify_ulds()
2683 struct adapter *adap; in cxgb4_inet6addr_handler() local
2689 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_inet6addr_handler()
2692 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2696 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2730 static void update_clip(const struct adapter *adap) in update_clip() argument
2739 dev = adap->port[i]; in update_clip()
2762 static int cxgb_up(struct adapter *adap) in cxgb_up() argument
2764 struct sge *s = &adap->sge; in cxgb_up()
2768 err = setup_sge_queues(adap); in cxgb_up()
2771 err = setup_rss(adap); in cxgb_up()
2775 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_up()
2781 err = request_irq(adap->msix_info[s->nd_msix_idx].vec, in cxgb_up()
2783 adap->msix_info[s->nd_msix_idx].desc, adap); in cxgb_up()
2787 err = request_msix_queue_irqs(adap); in cxgb_up()
2791 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2792 (adap->flags & CXGB4_USING_MSI) ? 0 in cxgb_up()
2794 adap->port[0]->name, adap); in cxgb_up()
2799 enable_rx(adap); in cxgb_up()
2800 t4_sge_start(adap); in cxgb_up()
2801 t4_intr_enable(adap); in cxgb_up()
2802 adap->flags |= CXGB4_FULL_INIT_DONE; in cxgb_up()
2805 notify_ulds(adap, CXGB4_STATE_UP); in cxgb_up()
2807 update_clip(adap); in cxgb_up()
2812 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap); in cxgb_up()
2814 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2816 t4_free_sge_resources(adap); in cxgb_up()
2920 struct adapter *adap; in cxgb4_create_server_filter() local
2924 adap = netdev2adap(dev); in cxgb4_create_server_filter()
2927 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2928 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2932 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2941 clear_filter(adap, f); in cxgb4_create_server_filter()
2953 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2959 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2973 f->tid = stid + adap->tids.ftid_base; in cxgb4_create_server_filter()
2974 ret = set_filter_wr(adap, stid); in cxgb4_create_server_filter()
2976 clear_filter(adap, f); in cxgb4_create_server_filter()
2988 struct adapter *adap; in cxgb4_remove_server_filter() local
2990 adap = netdev2adap(dev); in cxgb4_remove_server_filter()
2993 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2994 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2996 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
3000 return delete_filter(adap, stid); in cxgb4_remove_server_filter()
3198 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap) in cxgb4_mgmt_fill_vf_station_mac_addr() argument
3206 err = t4_get_raw_vpd_params(adap, &adap->params.vpd); in cxgb4_mgmt_fill_vf_station_mac_addr()
3210 na = adap->params.vpd.na; in cxgb4_mgmt_fill_vf_station_mac_addr()
3226 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); in cxgb4_mgmt_fill_vf_station_mac_addr()
3228 macaddr[5] = adap->pf * nvfs + vf; in cxgb4_mgmt_fill_vf_station_mac_addr()
3229 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); in cxgb4_mgmt_fill_vf_station_mac_addr()
3236 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_mac() local
3249 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac); in cxgb4_mgmt_set_vf_mac()
3251 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); in cxgb4_mgmt_set_vf_mac()
3259 struct adapter *adap = pi->adapter; in cxgb4_mgmt_get_vf_config() local
3262 if (vf >= adap->num_vfs) in cxgb4_mgmt_get_vf_config()
3264 vfinfo = &adap->vfinfo[vf]; in cxgb4_mgmt_get_vf_config()
3291 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_rate() local
3298 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_rate()
3302 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3314 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_rate()
3317 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3319 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3322 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3324 adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3325 adap->vfinfo[vf].tx_rate = 0; in cxgb4_mgmt_set_vf_rate()
3331 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3337 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); in cxgb4_mgmt_set_vf_rate()
3342 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3354 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET, in cxgb4_mgmt_set_vf_rate()
3362 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", in cxgb4_mgmt_set_vf_rate()
3366 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3374 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, in cxgb4_mgmt_set_vf_rate()
3377 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3379 ret, adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3382 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", in cxgb4_mgmt_set_vf_rate()
3383 adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3384 adap->vfinfo[vf].tx_rate = max_tx_rate; in cxgb4_mgmt_set_vf_rate()
3392 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_vlan() local
3395 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) in cxgb4_mgmt_set_vf_vlan()
3401 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan); in cxgb4_mgmt_set_vf_vlan()
3403 adap->vfinfo[vf].vlan = vlan; in cxgb4_mgmt_set_vf_vlan()
3407 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n", in cxgb4_mgmt_set_vf_vlan()
3408 ret, (vlan ? "setting" : "clearing"), adap->pf, vf); in cxgb4_mgmt_set_vf_vlan()
3416 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_link_state() local
3420 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_link_state()
3442 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_link_state()
3445 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_link_state()
3447 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_link_state()
3451 adap->vfinfo[vf].link_state = link; in cxgb4_mgmt_set_vf_link_state()
3478 struct adapter *adap = pi->adapter; in cxgb_netpoll() local
3480 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_netpoll()
3482 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
3487 t4_intr_handler(adap)(0, adap); in cxgb_netpoll()
3494 struct adapter *adap = pi->adapter; in cxgb_set_tx_maxrate() local
3507 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_set_tx_maxrate()
3508 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3517 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3528 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3541 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3575 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3613 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc_matchall() local
3615 if (!adap->tc_matchall) in cxgb_setup_tc_matchall()
3639 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc_block_ingress_cb() local
3641 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_ingress_cb()
3642 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_ingress_cb()
3668 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc_block_egress_cb() local
3670 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_egress_cb()
3671 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_egress_cb()
3693 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc_mqprio() local
3695 if (!is_ethofld(adap) || !adap->tc_mqprio) in cxgb_setup_tc_mqprio()
3917 struct adapter *adap; in notify_fatal_err() local
3919 adap = container_of(work, struct adapter, fatal_err_notify_task); in notify_fatal_err()
3920 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR); in notify_fatal_err()
3923 void t4_fatal_err(struct adapter *adap) in t4_fatal_err() argument
3927 if (pci_channel_offline(adap->pdev)) in t4_fatal_err()
3933 t4_shutdown_adapter(adap); in t4_fatal_err()
3934 for_each_port(adap, port) { in t4_fatal_err()
3935 struct net_device *dev = adap->port[port]; in t4_fatal_err()
3946 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
3947 queue_work(adap->workq, &adap->fatal_err_notify_task); in t4_fatal_err()
3950 static void setup_memwin(struct adapter *adap) in setup_memwin() argument
3952 u32 nic_win_base = t4_get_util_window(adap); in setup_memwin()
3954 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC); in setup_memwin()
3957 static void setup_memwin_rdma(struct adapter *adap) in setup_memwin_rdma() argument
3959 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3963 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); in setup_memwin_rdma()
3965 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3966 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3967 t4_write_reg(adap, in setup_memwin_rdma()
3970 t4_write_reg(adap, in setup_memwin_rdma()
3972 adap->vres.ocq.start); in setup_memwin_rdma()
3973 t4_read_reg(adap, in setup_memwin_rdma()
4178 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) in adap_init1() argument
4186 ret = t4_get_pfres(adap); in adap_init1()
4188 dev_err(adap->pdev_dev, in adap_init1()
4198 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); in adap_init1()
4204 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); in adap_init1()
4208 ret = t4_config_glbl_rss(adap, adap->pf, in adap_init1()
4215 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, in adap_init1()
4221 t4_sge_init(adap); in adap_init1()
4224 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849); in adap_init1()
4225 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
4226 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); in adap_init1()
4227 v = t4_read_reg(adap, TP_PIO_DATA_A); in adap_init1()
4228 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F); in adap_init1()
4231 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
4232 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, in adap_init1()
4233 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
4237 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
4239 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
4241 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
4245 if (is_offload(adap)) { in adap_init1()
4246 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, in adap_init1()
4251 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, in adap_init1()
4259 return t4_early_init(adap, adap->pf); in adap_init1()
4383 static int adap_init0_phy(struct adapter *adap) in adap_init0_phy() argument
4391 phy_info = find_phy_info(adap->pdev->device); in adap_init0_phy()
4393 dev_warn(adap->pdev_dev, in adap_init0_phy()
4404 adap->pdev_dev); in adap_init0_phy()
4412 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " in adap_init0_phy()
4418 t4_phy_fw_ver(adap, &cur_phy_fw_ver); in adap_init0_phy()
4419 dev_warn(adap->pdev_dev, "continuing with, on-adapter " in adap_init0_phy()
4429 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, in adap_init0_phy()
4432 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", in adap_init0_phy()
4440 dev_info(adap->pdev_dev, "Successfully transferred PHY " in adap_init0_phy()
4761 static int adap_init0(struct adapter *adap, int vpd_skip) in adap_init0() argument
4773 ret = t4_init_devlog_params(adap); in adap_init0()
4778 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, in adap_init0()
4781 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
4785 if (ret == adap->mbox) in adap_init0()
4786 adap->flags |= CXGB4_MASTER_PF; in adap_init0()
4796 t4_get_version_info(adap); in adap_init0()
4797 ret = t4_check_fw_version(adap); in adap_init0()
4801 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
4811 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4813 dev_err(adap->pdev_dev, in adap_init0()
4815 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4830 adap->pdev_dev); in adap_init0()
4832 dev_err(adap->pdev_dev, in adap_init0()
4841 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, in adap_init0()
4856 ret = adap_config_hma(adap); in adap_init0()
4858 dev_err(adap->pdev_dev, in adap_init0()
4861 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
4863 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
4865 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
4873 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in adap_init0()
4880 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
4889 ret = adap_init0_config(adap, reset); in adap_init0()
4891 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
4896 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
4906 ret = t4_get_pfres(adap); in adap_init0()
4908 dev_err(adap->pdev_dev, in adap_init0()
4924 ret = t4_get_vpd_params(adap, &adap->params.vpd); in adap_init0()
4936 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); in adap_init0()
4940 adap->params.nports = hweight32(port_vec); in adap_init0()
4941 adap->params.portvec = port_vec; in adap_init0()
4947 ret = t4_sge_init(adap); in adap_init0()
4956 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4960 adap->sge.dbqtimer_tick = val[0]; in adap_init0()
4961 ret = t4_read_sge_dbqtimers(adap, in adap_init0()
4962 ARRAY_SIZE(adap->sge.dbqtimer_val), in adap_init0()
4963 adap->sge.dbqtimer_val); in adap_init0()
4967 adap->flags |= CXGB4_SGE_DBQ_TIMER; in adap_init0()
4969 if (is_bypass_device(adap->pdev->device)) in adap_init0()
4970 adap->params.bypass = 1; in adap_init0()
4981 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); in adap_init0()
4984 adap->sge.egr_start = val[0]; in adap_init0()
4985 adap->l2t_start = val[1]; in adap_init0()
4986 adap->l2t_end = val[2]; in adap_init0()
4987 adap->tids.ftid_base = val[3]; in adap_init0()
4988 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
4989 adap->sge.ingr_start = val[5]; in adap_init0()
4991 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { in adap_init0()
4994 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
4999 adap->tids.hpftid_base = val[0]; in adap_init0()
5000 adap->tids.nhpftids = val[1] - val[0] + 1; in adap_init0()
5007 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5010 adap->rawf_start = val[0]; in adap_init0()
5011 adap->rawf_cnt = val[1] - val[0] + 1; in adap_init0()
5014 adap->tids.tid_base = in adap_init0()
5015 t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A); in adap_init0()
5026 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5029 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
5030 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
5032 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
5033 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
5034 if (!adap->sge.egr_map) { in adap_init0()
5039 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
5040 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
5041 if (!adap->sge.ingr_map) { in adap_init0()
5049 adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5050 if (!adap->sge.starving_fl) { in adap_init0()
5055 adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5056 if (!adap->sge.txq_maperr) { in adap_init0()
5062 adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5063 if (!adap->sge.blocked_fl) { in adap_init0()
5071 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5074 adap->clipt_start = val[0]; in adap_init0()
5075 adap->clipt_end = val[1]; in adap_init0()
5079 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5085 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; in adap_init0()
5087 adap->params.nsched_cls = val[0]; in adap_init0()
5093 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5098 adap->flags |= CXGB4_FW_OFLD_CONN; in adap_init0()
5099 adap->tids.aftid_base = val[0]; in adap_init0()
5100 adap->tids.aftid_end = val[1]; in adap_init0()
5110 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5118 if (is_t4(adap->params.chip)) { in adap_init0()
5119 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
5122 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5124 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
5129 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5131 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5134 if (is_t4(adap->params.chip)) { in adap_init0()
5135 adap->params.filter2_wr_support = false; in adap_init0()
5138 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5140 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5148 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5150 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); in adap_init0()
5160 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
5170 adap->params.offload = 1; in adap_init0()
5182 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5186 adap->tids.ntids = val[0]; in adap_init0()
5187 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
5188 adap->tids.stid_base = val[1]; in adap_init0()
5189 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
5199 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
5200 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
5201 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5202 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
5203 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5204 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
5205 adap->tids.ftid_base; in adap_init0()
5207 adap->vres.ddp.start = val[3]; in adap_init0()
5208 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
5209 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
5212 init_hash_filter(adap); in adap_init0()
5214 adap->num_ofld_uld += 1; in adap_init0()
5220 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5223 adap->tids.eotid_base = val[0]; in adap_init0()
5224 adap->tids.neotids = min_t(u32, MAX_ATIDS, in adap_init0()
5226 adap->params.ethofld = 1; in adap_init0()
5237 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5241 adap->vres.stag.start = val[0]; in adap_init0()
5242 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
5243 adap->vres.rq.start = val[2]; in adap_init0()
5244 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
5245 adap->vres.pbl.start = val[4]; in adap_init0()
5246 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
5250 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5253 adap->vres.srq.start = val[0]; in adap_init0()
5254 adap->vres.srq.size = val[1] - val[0] + 1; in adap_init0()
5256 if (adap->vres.srq.size) { in adap_init0()
5257 adap->srq = t4_init_srq(adap->vres.srq.size); in adap_init0()
5258 if (!adap->srq) in adap_init0()
5259 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n"); in adap_init0()
5268 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, in adap_init0()
5272 adap->vres.qp.start = val[0]; in adap_init0()
5273 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
5274 adap->vres.cq.start = val[2]; in adap_init0()
5275 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
5276 adap->vres.ocq.start = val[4]; in adap_init0()
5277 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
5281 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, in adap_init0()
5284 adap->params.max_ordird_qp = 8; in adap_init0()
5285 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
5288 adap->params.max_ordird_qp = val[0]; in adap_init0()
5289 adap->params.max_ird_adapter = val[1]; in adap_init0()
5291 dev_info(adap->pdev_dev, in adap_init0()
5293 adap->params.max_ordird_qp, in adap_init0()
5294 adap->params.max_ird_adapter); in adap_init0()
5298 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5300 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0); in adap_init0()
5304 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5306 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0); in adap_init0()
5307 adap->num_ofld_uld += 2; in adap_init0()
5312 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5316 adap->vres.iscsi.start = val[0]; in adap_init0()
5317 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
5318 if (is_t6(adap->params.chip)) { in adap_init0()
5321 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5324 adap->vres.ppod_edram.start = val[0]; in adap_init0()
5325 adap->vres.ppod_edram.size = in adap_init0()
5328 dev_info(adap->pdev_dev, in adap_init0()
5331 adap->vres.ppod_edram.size); in adap_init0()
5335 adap->num_ofld_uld += 2; in adap_init0()
5341 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5347 adap->vres.ncrypto_fc = val[0]; in adap_init0()
5349 adap->num_ofld_uld += 1; in adap_init0()
5355 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5359 adap->vres.key.start = val[0]; in adap_init0()
5360 adap->vres.key.size = val[1] - val[0] + 1; in adap_init0()
5361 adap->num_uld += 1; in adap_init0()
5363 adap->params.crypto = ntohs(caps_cmd.cryptocaps); in adap_init0()
5371 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
5393 if (adap->params.mtus[i] == 1492) { in adap_init0()
5394 adap->params.mtus[i] = 1488; in adap_init0()
5398 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
5399 adap->params.b_wnd); in adap_init0()
5401 t4_init_sge_params(adap); in adap_init0()
5402 adap->flags |= CXGB4_FW_OK; in adap_init0()
5403 t4_init_tp_params(adap, true); in adap_init0()
5412 adap_free_hma_mem(adap); in adap_init0()
5413 kfree(adap->sge.egr_map); in adap_init0()
5414 kfree(adap->sge.ingr_map); in adap_init0()
5415 bitmap_free(adap->sge.starving_fl); in adap_init0()
5416 bitmap_free(adap->sge.txq_maperr); in adap_init0()
5418 bitmap_free(adap->sge.blocked_fl); in adap_init0()
5421 t4_fw_bye(adap, adap->mbox); in adap_init0()
5431 struct adapter *adap = pci_get_drvdata(pdev); in eeh_err_detected() local
5433 if (!adap) in eeh_err_detected()
5437 adap->flags &= ~CXGB4_FW_OK; in eeh_err_detected()
5438 notify_ulds(adap, CXGB4_STATE_START_RECOVERY); in eeh_err_detected()
5439 spin_lock(&adap->stats_lock); in eeh_err_detected()
5440 for_each_port(adap, i) { in eeh_err_detected()
5441 struct net_device *dev = adap->port[i]; in eeh_err_detected()
5447 spin_unlock(&adap->stats_lock); in eeh_err_detected()
5448 disable_interrupts(adap); in eeh_err_detected()
5449 if (adap->flags & CXGB4_FULL_INIT_DONE) in eeh_err_detected()
5450 cxgb_down(adap); in eeh_err_detected()
5452 if ((adap->flags & CXGB4_DEV_ENABLED)) { in eeh_err_detected()
5454 adap->flags &= ~CXGB4_DEV_ENABLED; in eeh_err_detected()
5464 struct adapter *adap = pci_get_drvdata(pdev); in eeh_slot_reset() local
5466 if (!adap) { in eeh_slot_reset()
5472 if (!(adap->flags & CXGB4_DEV_ENABLED)) { in eeh_slot_reset()
5478 adap->flags |= CXGB4_DEV_ENABLED; in eeh_slot_reset()
5485 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
5487 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
5489 adap->flags |= CXGB4_FW_OK; in eeh_slot_reset()
5490 if (adap_init1(adap, &c)) in eeh_slot_reset()
5493 for_each_port(adap, i) { in eeh_slot_reset()
5494 struct port_info *pi = adap2pinfo(adap, i); in eeh_slot_reset()
5497 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1, in eeh_slot_reset()
5506 if (adap->params.viid_smt_extn_support) { in eeh_slot_reset()
5516 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
5517 adap->params.b_wnd); in eeh_slot_reset()
5518 setup_memwin(adap); in eeh_slot_reset()
5519 if (cxgb_up(adap)) in eeh_slot_reset()
5527 struct adapter *adap = pci_get_drvdata(pdev); in eeh_resume() local
5529 if (!adap) in eeh_resume()
5533 for_each_port(adap, i) { in eeh_resume()
5534 struct net_device *dev = adap->port[i]; in eeh_resume()
5653 static int cfg_queues(struct adapter *adap) in cfg_queues() argument
5658 struct sge *s = &adap->sge; in cfg_queues()
5663 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { in cfg_queues()
5664 adap->params.offload = 0; in cfg_queues()
5665 adap->params.crypto = 0; in cfg_queues()
5666 adap->params.ethofld = 0; in cfg_queues()
5681 niqflint = adap->params.pfres.niqflint - 1; in cfg_queues()
5682 if (!(adap->flags & CXGB4_USING_MSIX)) in cfg_queues()
5684 neq = adap->params.pfres.neq / 2; in cfg_queues()
5687 if (avail_qsets < adap->params.nports) { in cfg_queues()
5688 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", in cfg_queues()
5689 avail_qsets, adap->params.nports); in cfg_queues()
5694 for_each_port(adap, i) in cfg_queues()
5695 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
5703 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; in cfg_queues()
5711 if (adap->params.nports * 8 > avail_eth_qsets) { in cfg_queues()
5712 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", in cfg_queues()
5713 avail_eth_qsets, adap->params.nports * 8); in cfg_queues()
5717 if (adap->params.nports * ncpus < avail_eth_qsets) in cfg_queues()
5723 (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) in cfg_queues()
5735 for_each_port(adap, i) { in cfg_queues()
5736 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
5747 if (is_uld(adap)) { in cfg_queues()
5752 num_ulds = adap->num_uld + adap->num_ofld_uld; in cfg_queues()
5754 avail_uld_qsets = roundup(i, adap->params.nports); in cfg_queues()
5755 if (avail_qsets < num_ulds * adap->params.nports) { in cfg_queues()
5756 adap->params.offload = 0; in cfg_queues()
5757 adap->params.crypto = 0; in cfg_queues()
5760 s->ofldqsets = adap->params.nports; in cfg_queues()
5771 if (is_ethofld(adap)) { in cfg_queues()
5773 adap->params.ethofld = 0; in cfg_queues()
5788 else if (avail_qsets >= adap->params.nports) in cfg_queues()
5789 s->mirrorqsets = adap->params.nports; in cfg_queues()
5797 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
5807 if (!is_t4(adap->params.chip)) in cfg_queues()
5810 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
5811 init_rspq(adap, &s->intrq, 0, 1, 512, 64); in cfg_queues()
5820 static void reduce_ethqs(struct adapter *adap, int n) in reduce_ethqs() argument
5825 while (n < adap->sge.ethqsets) in reduce_ethqs()
5826 for_each_port(adap, i) { in reduce_ethqs()
5827 pi = adap2pinfo(adap, i); in reduce_ethqs()
5830 adap->sge.ethqsets--; in reduce_ethqs()
5831 if (adap->sge.ethqsets <= n) in reduce_ethqs()
5837 for_each_port(adap, i) { in reduce_ethqs()
5838 pi = adap2pinfo(adap, i); in reduce_ethqs()
5844 static int alloc_msix_info(struct adapter *adap, u32 num_vec) in alloc_msix_info() argument
5852 adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL); in alloc_msix_info()
5853 if (!adap->msix_bmap.msix_bmap) { in alloc_msix_info()
5858 spin_lock_init(&adap->msix_bmap.lock); in alloc_msix_info()
5859 adap->msix_bmap.mapsize = num_vec; in alloc_msix_info()
5861 adap->msix_info = msix_info; in alloc_msix_info()
5865 static void free_msix_info(struct adapter *adap) in free_msix_info() argument
5867 bitmap_free(adap->msix_bmap.msix_bmap); in free_msix_info()
5868 kfree(adap->msix_info); in free_msix_info()
5871 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap) in cxgb4_get_msix_idx_from_bmap() argument
5873 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_get_msix_idx_from_bmap()
5890 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, in cxgb4_free_msix_idx_in_bmap() argument
5893 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_free_msix_idx_in_bmap()
5904 static int enable_msix(struct adapter *adap) in enable_msix() argument
5908 u8 num_uld = 0, nchan = adap->params.nports; in enable_msix()
5910 struct sge *s = &adap->sge; in enable_msix()
5925 if (is_uld(adap)) { in enable_msix()
5926 num_uld = adap->num_ofld_uld + adap->num_uld; in enable_msix()
5932 if (is_ethofld(adap)) { in enable_msix()
5954 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
5961 allocated = pci_enable_msix_range(adap->pdev, entries, in enable_msix()
5964 dev_info(adap->pdev_dev, in enable_msix()
5970 dev_info(adap->pdev_dev, in enable_msix()
5972 adap->params.offload = 0; in enable_msix()
5973 adap->params.crypto = 0; in enable_msix()
5974 adap->params.ethofld = 0; in enable_msix()
5990 if (is_uld(adap)) in enable_msix()
5992 if (is_ethofld(adap)) in enable_msix()
6003 for_each_port(adap, i) { in enable_msix()
6004 pi = adap2pinfo(adap, i); in enable_msix()
6017 if (is_uld(adap)) { in enable_msix()
6040 if (is_uld(adap)) in enable_msix()
6042 if (is_ethofld(adap)) in enable_msix()
6050 reduce_ethqs(adap, ethqsets); in enable_msix()
6053 if (is_uld(adap)) { in enable_msix()
6058 if (is_ethofld(adap)) in enable_msix()
6063 for_each_port(adap, i) { in enable_msix()
6064 pi = adap2pinfo(adap, i); in enable_msix()
6071 ret = alloc_msix_info(adap, allocated); in enable_msix()
6076 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
6077 adap->msix_info[i].idx = i; in enable_msix()
6080 dev_info(adap->pdev_dev, in enable_msix()
6089 pci_disable_msix(adap->pdev); in enable_msix()
6098 static int init_rss(struct adapter *adap) in init_rss() argument
6103 err = t4_init_rss_mode(adap, adap->mbox); in init_rss()
6107 for_each_port(adap, i) { in init_rss()
6108 struct port_info *pi = adap2pinfo(adap, i); in init_rss()
6136 const struct adapter *adap = pi->adapter; in print_port_info() local
6160 netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf); in print_port_info()
6213 static int t4_get_chip_type(struct adapter *adap, int ver) in t4_get_chip_type() argument
6215 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A)); in t4_get_chip_type()
6248 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_iov_configure() local
6253 pcie_fw = readl(adap->regs + PCIE_FW_A); in cxgb4_iov_configure()
6283 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6284 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6285 adap->port[0] = NULL; in cxgb4_iov_configure()
6288 adap->num_vfs = 0; in cxgb4_iov_configure()
6289 kfree(adap->vfinfo); in cxgb4_iov_configure()
6290 adap->vfinfo = NULL; in cxgb4_iov_configure()
6328 FW_PFVF_CMD_PFN_V(adap->pf) | in cxgb4_iov_configure()
6331 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), in cxgb4_iov_configure()
6338 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx, in cxgb4_iov_configure()
6339 adap->pf); in cxgb4_iov_configure()
6346 pi->adapter = adap; in cxgb4_iov_configure()
6351 adap->port[0] = netdev; in cxgb4_iov_configure()
6354 err = register_netdev(adap->port[0]); in cxgb4_iov_configure()
6357 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6358 adap->port[0] = NULL; in cxgb4_iov_configure()
6362 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev), in cxgb4_iov_configure()
6364 if (!adap->vfinfo) { in cxgb4_iov_configure()
6365 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6366 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6367 adap->port[0] = NULL; in cxgb4_iov_configure()
6370 cxgb4_mgmt_fill_vf_station_mac_addr(adap); in cxgb4_iov_configure()
6377 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6378 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6379 adap->port[0] = NULL; in cxgb4_iov_configure()
6380 kfree(adap->vfinfo); in cxgb4_iov_configure()
6381 adap->vfinfo = NULL; in cxgb4_iov_configure()
6386 adap->num_vfs = num_vfs; in cxgb4_iov_configure()
6393 static int chcr_offload_state(struct adapter *adap, in chcr_offload_state() argument
6399 if (!adap->uld[CXGB4_ULD_KTLS].handle) { in chcr_offload_state()
6400 dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n"); in chcr_offload_state()
6403 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) { in chcr_offload_state()
6404 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6412 if (!adap->uld[CXGB4_ULD_IPSEC].handle) { in chcr_offload_state()
6413 dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n"); in chcr_offload_state()
6416 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { in chcr_offload_state()
6417 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6424 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6441 struct adapter *adap = netdev2adap(netdev); in cxgb4_ktls_dev_add() local
6445 ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS); in cxgb4_ktls_dev_add()
6449 ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE); in cxgb4_ktls_dev_add()
6453 ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk, in cxgb4_ktls_dev_add()
6459 cxgb4_set_ktls_feature(adap, in cxgb4_ktls_dev_add()
6470 struct adapter *adap = netdev2adap(netdev); in cxgb4_ktls_dev_del() local
6473 if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS)) in cxgb4_ktls_dev_del()
6476 adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx, in cxgb4_ktls_dev_del()
6480 cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE); in cxgb4_ktls_dev_del()
6495 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_add_state() local
6502 ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS); in cxgb4_xfrm_add_state()
6506 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack); in cxgb4_xfrm_add_state()
6516 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_del_state() local
6519 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_del_state()
6523 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) in cxgb4_xfrm_del_state()
6526 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); in cxgb4_xfrm_del_state()
6534 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_free_state() local
6537 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_free_state()
6541 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) in cxgb4_xfrm_free_state()
6544 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); in cxgb4_xfrm_free_state()
6552 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_ipsec_offload_ok() local
6556 dev_dbg(adap->pdev_dev, in cxgb4_ipsec_offload_ok()
6560 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) in cxgb4_ipsec_offload_ok()
6563 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); in cxgb4_ipsec_offload_ok()
6572 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_advance_esn_state() local
6575 dev_dbg(adap->pdev_dev, in cxgb4_advance_esn_state()
6579 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) in cxgb4_advance_esn_state()
6582 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); in cxgb4_advance_esn_state()