Lines Matching refs:adap

59 	struct adapter *adap = q->adap;  in uldrx_flush_handler()  local
61 if (adap->uld[q->uld].lro_flush) in uldrx_flush_handler()
62 adap->uld[q->uld].lro_flush(&q->lro_mgr); in uldrx_flush_handler()
77 struct adapter *adap = q->adap; in uldrx_handler() local
87 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, in uldrx_handler()
91 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, in uldrx_handler()
108 static int alloc_uld_rxqs(struct adapter *adap, in alloc_uld_rxqs() argument
115 struct sge *s = &adap->sge; in alloc_uld_rxqs()
118 per_chan = rxq_info->nrxq / adap->params.nports; in alloc_uld_rxqs()
120 if (adap->flags & CXGB4_USING_MSIX) in alloc_uld_rxqs()
128 per_chan = rxq_info->nciq / adap->params.nports; in alloc_uld_rxqs()
133 msi_idx = cxgb4_get_msix_idx_from_bmap(adap); in alloc_uld_rxqs()
139 snprintf(adap->msix_info[msi_idx].desc, in alloc_uld_rxqs()
140 sizeof(adap->msix_info[msi_idx].desc), in alloc_uld_rxqs()
142 adap->port[0]->name, rxq_info->name, i); in alloc_uld_rxqs()
144 q->msix = &adap->msix_info[msi_idx]; in alloc_uld_rxqs()
146 err = t4_sge_alloc_rxq(adap, &q->rspq, false, in alloc_uld_rxqs()
147 adap->port[que_idx++ / per_chan], in alloc_uld_rxqs()
165 free_rspq_fl(adap, &q->rspq, in alloc_uld_rxqs()
168 cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx); in alloc_uld_rxqs()
174 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) in setup_sge_queues_uld() argument
176 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in setup_sge_queues_uld()
179 ret = alloc_uld_rxqs(adap, rxq_info, lro); in setup_sge_queues_uld()
184 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { in setup_sge_queues_uld()
185 struct sge *s = &adap->sge; in setup_sge_queues_uld()
190 for_each_port(adap, i) { in setup_sge_queues_uld()
195 ret = t4_set_params(adap, adap->mbox, adap->pf, in setup_sge_queues_uld()
202 static void t4_free_uld_rxqs(struct adapter *adap, int n, in t4_free_uld_rxqs() argument
207 free_rspq_fl(adap, &q->rspq, in t4_free_uld_rxqs()
212 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) in free_sge_queues_uld() argument
214 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_sge_queues_uld()
216 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { in free_sge_queues_uld()
217 struct sge *s = &adap->sge; in free_sge_queues_uld()
222 for_each_port(adap, i) { in free_sge_queues_uld()
226 t4_set_params(adap, adap->mbox, adap->pf, in free_sge_queues_uld()
232 t4_free_uld_rxqs(adap, rxq_info->nciq, in free_sge_queues_uld()
234 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); in free_sge_queues_uld()
237 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, in cfg_queues_uld() argument
240 struct sge *s = &adap->sge; in cfg_queues_uld()
248 if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { in cfg_queues_uld()
250 rxq_info->nrxq = roundup(i, adap->params.nports); in cfg_queues_uld()
254 rxq_info->nrxq = roundup(i, adap->params.nports); in cfg_queues_uld()
259 if (adap->flags & CXGB4_USING_MSIX) in cfg_queues_uld()
265 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) * in cfg_queues_uld()
266 adap->params.nports); in cfg_queues_uld()
268 adap->params.nports); in cfg_queues_uld()
289 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64); in cfg_queues_uld()
294 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; in cfg_queues_uld()
296 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n"); in cfg_queues_uld()
303 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); in cfg_queues_uld()
308 adap->sge.uld_rxq_info[uld_type] = rxq_info; in cfg_queues_uld()
313 static void free_queues_uld(struct adapter *adap, unsigned int uld_type) in free_queues_uld() argument
315 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_queues_uld()
317 adap->sge.uld_rxq_info[uld_type] = NULL; in free_queues_uld()
324 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) in request_msix_queue_irqs_uld() argument
326 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in request_msix_queue_irqs_uld()
340 cxgb4_set_msix_aff(adap, minfo->vec, in request_msix_queue_irqs_uld()
349 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx); in request_msix_queue_irqs_uld()
356 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) in free_msix_queue_irqs_uld() argument
358 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_msix_queue_irqs_uld()
365 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx); in free_msix_queue_irqs_uld()
370 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) in enable_rx_uld() argument
372 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in enable_rx_uld()
381 cxgb4_enable_rx(adap, q); in enable_rx_uld()
385 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) in quiesce_rx_uld() argument
387 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in quiesce_rx_uld()
401 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info) in free_sge_txq_uld() argument
411 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, in free_sge_txq_uld()
413 free_tx_desc(adap, &txq->q, txq->q.in_use, false); in free_sge_txq_uld()
416 free_txq(adap, &txq->q); in free_sge_txq_uld()
422 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info, in alloc_sge_txq_uld() argument
425 struct sge *s = &adap->sge; in alloc_sge_txq_uld()
429 j = nq / adap->params.nports; in alloc_sge_txq_uld()
434 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j], in alloc_sge_txq_uld()
441 free_sge_txq_uld(adap, txq_info); in alloc_sge_txq_uld()
446 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type) in release_sge_txq_uld() argument
451 txq_info = adap->sge.uld_txq_info[tx_uld_type]; in release_sge_txq_uld()
454 free_sge_txq_uld(adap, txq_info); in release_sge_txq_uld()
457 adap->sge.uld_txq_info[tx_uld_type] = NULL; in release_sge_txq_uld()
462 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type, in setup_sge_txq_uld() argument
469 txq_info = adap->sge.uld_txq_info[tx_uld_type]; in setup_sge_txq_uld()
479 i = min_t(int, adap->vres.ncrypto_fc, in setup_sge_txq_uld()
481 txq_info->ntxq = rounddown(i, adap->params.nports); in setup_sge_txq_uld()
483 dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n"); in setup_sge_txq_uld()
490 txq_info->ntxq = roundup(i, adap->params.nports); in setup_sge_txq_uld()
499 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) { in setup_sge_txq_uld()
506 adap->sge.uld_txq_info[tx_uld_type] = txq_info; in setup_sge_txq_uld()
510 static void uld_queue_init(struct adapter *adap, unsigned int uld_type, in uld_queue_init() argument
513 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in uld_queue_init()
515 struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type]; in uld_queue_init()
524 int t4_uld_mem_alloc(struct adapter *adap) in t4_uld_mem_alloc() argument
526 struct sge *s = &adap->sge; in t4_uld_mem_alloc()
528 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL); in t4_uld_mem_alloc()
529 if (!adap->uld) in t4_uld_mem_alloc()
548 kfree(adap->uld); in t4_uld_mem_alloc()
552 void t4_uld_mem_free(struct adapter *adap) in t4_uld_mem_free() argument
554 struct sge *s = &adap->sge; in t4_uld_mem_free()
558 kfree(adap->uld); in t4_uld_mem_free()
562 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) in cxgb4_shutdown_uld_adapter() argument
564 if (adap->uld[type].handle) { in cxgb4_shutdown_uld_adapter()
565 adap->uld[type].handle = NULL; in cxgb4_shutdown_uld_adapter()
566 adap->uld[type].add = NULL; in cxgb4_shutdown_uld_adapter()
567 release_sge_txq_uld(adap, type); in cxgb4_shutdown_uld_adapter()
569 if (adap->flags & CXGB4_FULL_INIT_DONE) in cxgb4_shutdown_uld_adapter()
570 quiesce_rx_uld(adap, type); in cxgb4_shutdown_uld_adapter()
572 if (adap->flags & CXGB4_USING_MSIX) in cxgb4_shutdown_uld_adapter()
573 free_msix_queue_irqs_uld(adap, type); in cxgb4_shutdown_uld_adapter()
575 free_sge_queues_uld(adap, type); in cxgb4_shutdown_uld_adapter()
576 free_queues_uld(adap, type); in cxgb4_shutdown_uld_adapter()
580 void t4_uld_clean_up(struct adapter *adap) in t4_uld_clean_up() argument
584 if (!is_uld(adap)) in t4_uld_clean_up()
589 if (!adap->uld[i].handle) in t4_uld_clean_up()
592 cxgb4_shutdown_uld_adapter(adap, i); in t4_uld_clean_up()
597 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) in uld_init() argument
601 lld->pdev = adap->pdev; in uld_init()
602 lld->pf = adap->pf; in uld_init()
603 lld->l2t = adap->l2t; in uld_init()
604 lld->tids = &adap->tids; in uld_init()
605 lld->ports = adap->port; in uld_init()
606 lld->vr = &adap->vres; in uld_init()
607 lld->mtus = adap->params.mtus; in uld_init()
608 lld->nchan = adap->params.nports; in uld_init()
609 lld->nports = adap->params.nports; in uld_init()
610 lld->wr_cred = adap->params.ofldq_wr_cred; in uld_init()
611 lld->crypto = adap->params.crypto; in uld_init()
612 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); in uld_init()
613 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); in uld_init()
614 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); in uld_init()
615 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A); in uld_init()
616 lld->iscsi_ppm = &adap->iscsi_ppm; in uld_init()
617 lld->adapter_type = adap->params.chip; in uld_init()
618 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; in uld_init()
619 lld->udb_density = 1 << adap->params.sge.eq_qpp; in uld_init()
620 lld->ucq_density = 1 << adap->params.sge.iq_qpp; in uld_init()
621 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); in uld_init()
622 lld->filt_mode = adap->params.tp.vlan_pri_map; in uld_init()
626 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); in uld_init()
627 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); in uld_init()
628 lld->fw_vers = adap->params.fw_vers; in uld_init()
630 lld->sge_ingpadboundary = adap->sge.fl_align; in uld_init()
631 lld->sge_egrstatuspagesize = adap->sge.stat_len; in uld_init()
632 lld->sge_pktshift = adap->sge.pktshift; in uld_init()
633 lld->ulp_crypto = adap->params.crypto; in uld_init()
634 lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN; in uld_init()
635 lld->max_ordird_qp = adap->params.max_ordird_qp; in uld_init()
636 lld->max_ird_adapter = adap->params.max_ird_adapter; in uld_init()
637 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; in uld_init()
638 lld->nodeid = dev_to_node(adap->pdev_dev); in uld_init()
639 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support; in uld_init()
640 lld->write_w_imm_support = adap->params.write_w_imm_support; in uld_init()
641 lld->write_cmpl_support = adap->params.write_cmpl_support; in uld_init()
644 static int uld_attach(struct adapter *adap, unsigned int uld) in uld_attach() argument
649 uld_init(adap, &lli); in uld_attach()
650 uld_queue_init(adap, uld, &lli); in uld_attach()
652 handle = adap->uld[uld].add(&lli); in uld_attach()
654 dev_warn(adap->pdev_dev, in uld_attach()
656 adap->uld[uld].name, PTR_ERR(handle)); in uld_attach()
660 adap->uld[uld].handle = handle; in uld_attach()
663 if (adap->flags & CXGB4_FULL_INIT_DONE) in uld_attach()
664 adap->uld[uld].state_change(handle, CXGB4_STATE_UP); in uld_attach()
670 static bool cxgb4_uld_in_use(struct adapter *adap) in cxgb4_uld_in_use() argument
672 const struct tid_info *t = &adap->tids; in cxgb4_uld_in_use()
681 int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) in cxgb4_set_ktls_feature() argument
691 if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) { in cxgb4_set_ktls_feature()
695 if (cxgb4_uld_in_use(adap)) { in cxgb4_set_ktls_feature()
696 dev_dbg(adap->pdev_dev, in cxgb4_set_ktls_feature()
700 ret = t4_set_params(adap, adap->mbox, adap->pf, in cxgb4_set_ktls_feature()
704 refcount_set(&adap->chcr_ktls.ktls_refcount, 1); in cxgb4_set_ktls_feature()
708 refcount_inc(&adap->chcr_ktls.ktls_refcount); in cxgb4_set_ktls_feature()
712 if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) in cxgb4_set_ktls_feature()
717 if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) { in cxgb4_set_ktls_feature()
718 ret = t4_set_params(adap, adap->mbox, adap->pf, in cxgb4_set_ktls_feature()
730 static void cxgb4_uld_alloc_resources(struct adapter *adap, in cxgb4_uld_alloc_resources() argument
736 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || in cxgb4_uld_alloc_resources()
737 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) in cxgb4_uld_alloc_resources()
739 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) in cxgb4_uld_alloc_resources()
741 ret = cfg_queues_uld(adap, type, p); in cxgb4_uld_alloc_resources()
744 ret = setup_sge_queues_uld(adap, type, p->lro); in cxgb4_uld_alloc_resources()
747 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_uld_alloc_resources()
748 ret = request_msix_queue_irqs_uld(adap, type); in cxgb4_uld_alloc_resources()
752 if (adap->flags & CXGB4_FULL_INIT_DONE) in cxgb4_uld_alloc_resources()
753 enable_rx_uld(adap, type); in cxgb4_uld_alloc_resources()
754 if (adap->uld[type].add) in cxgb4_uld_alloc_resources()
756 ret = setup_sge_txq_uld(adap, type, p); in cxgb4_uld_alloc_resources()
759 adap->uld[type] = *p; in cxgb4_uld_alloc_resources()
760 ret = uld_attach(adap, type); in cxgb4_uld_alloc_resources()
765 release_sge_txq_uld(adap, type); in cxgb4_uld_alloc_resources()
767 if (adap->flags & CXGB4_FULL_INIT_DONE) in cxgb4_uld_alloc_resources()
768 quiesce_rx_uld(adap, type); in cxgb4_uld_alloc_resources()
769 if (adap->flags & CXGB4_USING_MSIX) in cxgb4_uld_alloc_resources()
770 free_msix_queue_irqs_uld(adap, type); in cxgb4_uld_alloc_resources()
772 free_sge_queues_uld(adap, type); in cxgb4_uld_alloc_resources()
774 free_queues_uld(adap, type); in cxgb4_uld_alloc_resources()
776 dev_warn(adap->pdev_dev, in cxgb4_uld_alloc_resources()
780 void cxgb4_uld_enable(struct adapter *adap) in cxgb4_uld_enable() argument
785 list_add_tail(&adap->list_node, &adapter_list); in cxgb4_uld_enable()
787 cxgb4_uld_alloc_resources(adap, uld_entry->uld_type, in cxgb4_uld_enable()
803 struct adapter *adap; in cxgb4_register_uld() local
814 list_for_each_entry(adap, &adapter_list, list_node) in cxgb4_register_uld()
815 cxgb4_uld_alloc_resources(adap, type, p); in cxgb4_register_uld()
833 struct adapter *adap; in cxgb4_unregister_uld() local
839 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_unregister_uld()
840 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || in cxgb4_unregister_uld()
841 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) in cxgb4_unregister_uld()
843 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) in cxgb4_unregister_uld()
846 cxgb4_shutdown_uld_adapter(adap, type); in cxgb4_unregister_uld()