Home
last modified time | relevance | path

Searched refs:pport (Results 1 – 25 of 80) sorted by relevance

1234

/linux-6.1.9/drivers/infiniband/hw/qib/
Dqib_init.c146 ppd = dd->pport + (i % dd->num_pports); in qib_create_ctxts()
334 dd->pport[pidx].statusp = status_page; in init_pioavailregs()
457 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | in init_after_reset()
461 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | in init_after_reset()
477 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | in enable_chip()
529 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); in init_piobuf_state()
531 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); in init_piobuf_state()
582 ppd = dd->pport + pidx; in qib_create_workqueues()
599 ppd = dd->pport + pidx; in qib_create_workqueues()
643 ppd = dd->pport + pidx; in qib_init()
[all …]
Dqib_sysfs.c46 return &dd->pport[port_num - 1]; in qib_get_pportdata_kobj()
56 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in hrtbt_enable_show()
66 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in hrtbt_enable_store()
93 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in loopback_store()
109 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in led_override_store()
128 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in status_show()
159 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in status_str_show()
314 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; in sl2vl_attr_show()
382 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; in diagc_attr_show()
394 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; in diagc_attr_store()
[all …]
Dqib_tx.c71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs()
120 ppd = dd->pport + pidx; in is_sdma_buf()
203 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_set()
370 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); in qib_sendbuf_done()
512 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_force_pio_avail_update()
Dqib_iba7220.c773 qib_disarm_7220_senderrbufs(dd->pport); in qib_7220_txe_recover()
1038 qib_decode_7220_sdma_errs(dd->pport, err, buf, blen); in qib_decode_7220_err()
1099 struct qib_pportdata *ppd = dd->pport; in handle_7220_errors()
1200 *dd->pport->statusp &= ~QIB_STATUS_IB_CONF; in handle_7220_errors()
1263 qib_cancel_sends(dd->pport); in qib_7220_clear_freeze()
1992 sdma_7220_intr(dd->pport, istat); in qib_7220intr()
2082 dd->pport->cpspec->ibdeltainprog = 0; in qib_setup_7220_reset()
2083 dd->pport->cpspec->ibsymdelta = 0; in qib_setup_7220_reset()
2084 dd->pport->cpspec->iblnkerrdelta = 0; in qib_setup_7220_reset()
2134 if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) in qib_setup_7220_reset()
[all …]
Dqib_mad.c210 struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data; in qib_cap_mask_chg()
288 dd->pport[pidx].guid == 0) in subn_get_nodeinfo()
291 nip->port_guid = dd->pport[pidx].guid; in subn_get_nodeinfo()
299 nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */ in subn_get_nodeinfo()
326 struct qib_pportdata *ppd = dd->pport + pidx; in subn_get_guidinfo()
486 ppd = dd->pport + (port_num - 1); in subn_get_portinfo()
583 struct qib_pportdata *ppd = dd->pport + port - 1; in get_pkeys()
631 struct qib_pportdata *ppd = dd->pport + pidx; in subn_set_guidinfo()
685 ppd = dd->pport + (port_num - 1); in subn_set_portinfo()
1001 ppd = dd->pport + (port - 1); in set_pkeys()
[all …]
Dqib_iba7322.c1654 qib_disarm_7322_senderrbufs(dd->pport); in handle_7322_errors()
1660 qib_disarm_7322_senderrbufs(dd->pport); in handle_7322_errors()
1688 if (dd->pport[pidx].link_speed_supported) in handle_7322_errors()
1689 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF; in handle_7322_errors()
2033 if (dd->pport[pidx].link_speed_supported) in qib_7322_clear_freeze()
2034 qib_write_kreg_port(dd->pport + pidx, krp_errmask, in qib_7322_clear_freeze()
2055 if (!dd->pport[pidx].link_speed_supported) in qib_7322_clear_freeze()
2057 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull); in qib_7322_clear_freeze()
2058 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull); in qib_7322_clear_freeze()
2147 struct qib_pportdata *ppd = dd->pport; in qib_7322_handle_hwerrors()
[all …]
Dqib_qp.c221 struct qib_ibport *ibp = &dd->pport[n].ibport_data; in qib_free_all_qps()
302 if (mtu > dd->pport[pidx].ibmtu) in qib_get_pmtu_from_attr()
303 pmtu = mtu_to_enum(dd->pport[pidx].ibmtu); in qib_get_pmtu_from_attr()
Dqib_verbs.c911 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; in qib_verbs_send_pio()
1196 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in qib_query_port()
1254 struct qib_ibport *ibp = &dd->pport[i].ibport_data; in qib_modify_device()
1264 struct qib_ibport *ibp = &dd->pport[i].ibport_data; in qib_modify_device()
1280 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; in qib_shut_down_port()
1501 struct qib_pportdata *ppd = dd->pport; in qib_register_ib_device()
1608 ppd = dd->pport; in qib_register_ib_device()
1667 if (dd->pport->sdma_descq_cnt) in qib_unregister_ib_device()
1669 dd->pport->sdma_descq_cnt * in qib_unregister_ib_device()
/linux-6.1.9/drivers/scsi/lpfc/
Dlpfc_logmsg.h64 { uint32_t log_verbose = (phba)->pport ? \
65 (phba)->pport->cfg_log_verbose : \
88 { uint32_t log_verbose = (phba)->pport ? \
89 (phba)->pport->cfg_log_verbose : \
Dlpfc_vport.c296 struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata; in lpfc_vport_create() local
297 struct lpfc_hba *phba = pport->phba; in lpfc_vport_create()
401 vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth; in lpfc_vport_create()
414 vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; in lpfc_vport_create()
415 vport->fdmi_port_mask = phba->pport->fdmi_port_mask; in lpfc_vport_create()
423 (pport->fc_flag & FC_VFI_REGISTERED)) { in lpfc_vport_create()
445 (pport->port_state < LPFC_FABRIC_CFG_LINK) || in lpfc_vport_create()
461 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); in lpfc_vport_create()
609 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); in enable_vport()
656 !(phba->pport->load_flag & FC_UNLOADING)) { in lpfc_vport_delete()
[all …]
Dlpfc_vmid.c114 if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) in lpfc_vmid_update_entry()
140 lpfc_get_vmid_from_hashtable(vport->phba->pport, hash, in lpfc_vmid_assign_cs_ctl()
242 if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) in lpfc_vmid_get_appid()
259 if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) in lpfc_vmid_get_appid()
278 if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { in lpfc_vmid_get_appid()
282 vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; in lpfc_vmid_get_appid()
Dlpfc_hbadisc.c361 vport = phba->pport; in lpfc_check_inactive_vmid()
573 vport = phba->pport; in lpfc_check_vmid_qfpa_issue()
863 = (phba->pport->stopped) in lpfc_work_list_done()
873 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_work_list_done()
925 if (phba->pport->work_port_events & in lpfc_work_done()
928 phba->pport->work_port_events &= in lpfc_work_done()
931 if (phba->pport->work_port_events & in lpfc_work_done()
934 phba->pport->work_port_events &= in lpfc_work_done()
965 vport = phba->pport; in lpfc_work_done()
1216 struct lpfc_vport *vport = phba->pport; in lpfc_linkdown()
[all …]
Dlpfc_init.c418 struct lpfc_vport *vport = phba->pport; in lpfc_config_port_post()
636 pmb->vport = phba->pport; in lpfc_config_port_post()
656 pmb->vport = phba->pport; in lpfc_config_port_post()
758 struct lpfc_vport *vport = phba->pport; in lpfc_hba_init_link_fc_topology()
892 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_hba_down_prep()
893 lpfc_cleanup_discovery_resources(phba->pport); in lpfc_hba_down_prep()
1200 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); in lpfc_hb_timeout()
1201 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; in lpfc_hb_timeout()
1203 phba->pport->work_port_events |= WORKER_HB_TMO; in lpfc_hb_timeout()
1204 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); in lpfc_hb_timeout()
[all …]
Dlpfc_nvmet.c875 if (phba->pport->load_flag & FC_UNLOADING) in __lpfc_nvme_xmt_ls_rsp()
987 if (axchg->phba->pport->load_flag & FC_UNLOADING) in lpfc_nvmet_xmt_ls_rsp()
1025 if (phba->pport->load_flag & FC_UNLOADING) { in lpfc_nvmet_xmt_fcp_op()
1148 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_nvmet_xmt_fcp_abort()
1320 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_nvmet_ls_req()
1329 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq, in lpfc_nvmet_ls_req()
1356 if (phba->pport->load_flag & FC_UNLOADING) in lpfc_nvmet_ls_abort()
1361 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq); in lpfc_nvmet_ls_abort()
1399 rc = lpfc_issue_els_rscn(phba->pport, 0); in lpfc_nvmet_discovery_event()
1645 struct lpfc_vport *vport = phba->pport; in lpfc_nvmet_create_targetport()
[all …]
Dlpfc_sli.c1039 (!(phba->pport->load_flag & FC_UNLOADING))) in lpfc_handle_rrq_active()
1183 if (phba->pport->load_flag & FC_UNLOADING) { in lpfc_set_rrq_active()
2003 sync_buf->vport = phba->pport; in lpfc_issue_cmf_sync_wqe()
2869 if (!(phba->pport->load_flag & FC_UNLOADING) && in lpfc_sli_def_mbox_cmpl()
2891 !(phba->pport->load_flag & FC_UNLOADING) && in lpfc_sli_def_mbox_cmpl()
3067 lpfc_debugfs_disc_trc(phba->pport, in lpfc_sli_handle_mb_event()
3220 if (phba->pport->load_flag & FC_UNLOADING) { in lpfc_nvme_unsol_ls_handler()
3224 } else if (!phba->nvmet_support && !phba->pport->localport) { in lpfc_nvme_unsol_ls_handler()
3247 ndlp = lpfc_findnode_did(phba->pport, sid); in lpfc_nvme_unsol_ls_handler()
3444 saveq->vport = phba->pport; in lpfc_sli_process_unsol_iocb()
[all …]
Dlpfc_bsg.c463 cmdiocbq->vport = phba->pport; in lpfc_bsg_send_mgmt_cmd()
1456 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); in lpfc_issue_ct_rsp()
1513 ctiocb->vport = phba->pport; in lpfc_issue_ct_rsp()
1673 shost = lpfc_shost_from_vport(phba->pport); in lpfc_bsg_diag_mode_enter()
1715 shost = lpfc_shost_from_vport(phba->pport); in lpfc_bsg_diag_mode_exit()
1980 if (phba->pport->fc_flag & FC_VFI_REGISTERED) { in lpfc_sli4_diag_fcport_reg_setup()
1984 phba->pport->fc_myDID, phba->fcf.fcfi, in lpfc_sli4_diag_fcport_reg_setup()
1985 phba->sli4_hba.vfi_ids[phba->pport->vfi], in lpfc_sli4_diag_fcport_reg_setup()
1986 phba->vpi_ids[phba->pport->vpi]); in lpfc_sli4_diag_fcport_reg_setup()
1989 return lpfc_issue_reg_vfi(phba->pport); in lpfc_sli4_diag_fcport_reg_setup()
[all …]
/linux-6.1.9/drivers/char/
Dppdev.c122 struct parport *pport; in pp_read() local
138 pport = pp->pdev->port; in pp_read()
139 mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); in pp_read()
158 if (pport->ieee1284.mode & IEEE1284_ADDR) in pp_read()
159 fn = pport->ops->epp_read_addr; in pp_read()
161 fn = pport->ops->epp_read_data; in pp_read()
162 bytes_read = (*fn)(pport, kbuffer, need, flags); in pp_read()
164 bytes_read = parport_read(pport, kbuffer, need); in pp_read()
202 struct parport *pport; in pp_write() local
214 pport = pp->pdev->port; in pp_write()
[all …]
/linux-6.1.9/drivers/scsi/bfa/
Dbfad.c244 bfad->pport.flags |= BFAD_PORT_DELETE; in bfad_sm_created()
440 port_drv = &bfad->pport; in bfa_fcb_lport_new()
893 bfad->pport.flags |= BFAD_PORT_DELETE; in bfad_fcs_stop()
924 if (bfad->pport.im_port == NULL) { in bfad_cfg_pport()
929 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port, in bfad_cfg_pport()
934 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; in bfad_cfg_pport()
947 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { in bfad_uncfg_pport()
948 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); in bfad_uncfg_pport()
949 bfad_im_port_clean(bfad->pport.im_port); in bfad_uncfg_pport()
950 kfree(bfad->pport.im_port); in bfad_uncfg_pport()
[all …]
/linux-6.1.9/drivers/infiniband/hw/hfi1/
Dinit.c143 ret = hfi1_create_kctxt(dd, dd->pport); in hfi1_create_kctxts()
744 ppd = dd->pport + pidx; in create_workqueues()
775 ppd = dd->pport + pidx; in create_workqueues()
798 ppd = dd->pport + pidx; in destroy_workqueues()
867 ppd = dd->pport + pidx; in hfi1_init()
918 ppd = dd->pport + pidx; in hfi1_init()
944 ppd = dd->pport + pidx; in hfi1_init()
987 ppd = dd->pport + pidx; in stop_timers()
1016 ppd = dd->pport + pidx; in shutdown_device()
1030 ppd = dd->pport + pidx; in shutdown_device()
[all …]
Dsysfs.c19 return &dd->pport[port_num - 1]; in hfi1_get_pportdata_kobj()
108 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; in cc_prescan_show()
118 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; in cc_prescan_store()
249 struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data; in sl2sc_attr_show()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/
Den_stats.c842 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
852 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1003 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
1010 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1062 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
1069 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1203 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, in MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS()
1211 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, in MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS()
1217 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, in MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS()
1225 struct mlx5e_pport_stats *pstats = &priv->stats.pport; in MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS()
[all …]
/linux-6.1.9/drivers/net/ethernet/rocker/
Drocker_ofdpa.c197 u32 pport; member
1198 __be16 vlan_id, bool ttl_check, u32 pport) in ofdpa_group_l3_unicast() argument
1213 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); in ofdpa_group_l3_unicast()
1321 ofdpa_port->pport); in ofdpa_port_ipv4_neigh()
1472 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); in ofdpa_port_vlan_flood_group()
1508 out_pport = ofdpa_port->pport; in ofdpa_port_vlan_l2_groups()
1597 u32 in_pport = ofdpa_port->pport; in ofdpa_port_ctrl_vlan_acl()
1660 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask, in ofdpa_port_ctrl_vlan_term()
1730 u32 in_pport = ofdpa_port->pport; in ofdpa_port_vlan()
1848 u32 out_pport = ofdpa_port->pport; in ofdpa_port_fdb_learn()
[all …]
Drocker_main.c867 val |= 1ULL << rocker_port->pport; in rocker_port_set_enable()
869 val &= ~(1ULL << rocker_port->pport); in rocker_port_set_enable()
1108 rocker_port->pport)) in rocker_cmd_get_port_settings_prep()
1265 rocker_port->pport)) in rocker_cmd_set_port_settings_ethtool_prep()
1295 rocker_port->pport)) in rocker_cmd_set_port_settings_macaddr_prep()
1319 rocker_port->pport)) in rocker_cmd_set_port_settings_mtu_prep()
1343 rocker_port->pport)) in rocker_cmd_set_port_learning_prep()
2282 rocker_port->pport)) in rocker_cmd_get_port_stats_prep()
2298 u32 pport; in rocker_cmd_get_port_stats_ethtool_proc() local
2313 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); in rocker_cmd_get_port_stats_ethtool_proc()
[all …]
/linux-6.1.9/drivers/scsi/csiostor/
Dcsio_init.c269 0, hw->pport[0].portid, false, NULL); in csio_create_queues()
279 hw->pport[0].portid, true, NULL); in csio_create_queues()
287 mgmtm->iq_idx, hw->pport[0].portid, NULL); in csio_create_queues()
991 ln->portid = hw->pport[i].portid; in csio_probe_one()
1136 ln->portid = hw->pport[i].portid; in csio_pci_resume()
Dcsio_lnode.c352 if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G) in csio_ln_fdmi_rhba_cbfn()
354 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G) in csio_ln_fdmi_rhba_cbfn()
356 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G) in csio_ln_fdmi_rhba_cbfn()
358 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G) in csio_ln_fdmi_rhba_cbfn()
360 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G) in csio_ln_fdmi_rhba_cbfn()
362 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G) in csio_ln_fdmi_rhba_cbfn()
768 if (hw->pport[i].portid == portid) in csio_fcoe_enable_link()
769 memcpy(hw->pport[i].mac, lcmd->phy_mac, 6); in csio_fcoe_enable_link()

1234