/linux-5.19.10/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_mbx.c | 132 static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) in fm10k_mbx_index_len() argument 140 return len & ((mbx->mbmem_len << 1) - 1); in fm10k_mbx_index_len() 151 static u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset) in fm10k_mbx_tail_add() argument 153 u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1); in fm10k_mbx_tail_add() 156 return (tail > mbx->tail) ? --tail : ++tail; in fm10k_mbx_tail_add() 167 static u16 fm10k_mbx_tail_sub(struct fm10k_mbx_info *mbx, u16 offset) in fm10k_mbx_tail_sub() argument 169 u16 tail = (mbx->tail - offset - 1) & ((mbx->mbmem_len << 1) - 1); in fm10k_mbx_tail_sub() 172 return (tail < mbx->tail) ? ++tail : --tail; in fm10k_mbx_tail_sub() 183 static u16 fm10k_mbx_head_add(struct fm10k_mbx_info *mbx, u16 offset) in fm10k_mbx_head_add() argument 185 u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1); in fm10k_mbx_head_add() [all …]
|
D | fm10k_vf.c | 169 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_update_vlan_vf() local 189 return mbx->ops.enqueue_tx(hw, mbx, msg); in fm10k_update_vlan_vf() 201 struct fm10k_mbx_info __always_unused *mbx) in fm10k_msg_mac_vlan_vf() argument 275 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_update_uc_addr_vf() local 300 return mbx->ops.enqueue_tx(hw, mbx, msg); in fm10k_update_uc_addr_vf() 318 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_update_mc_addr_vf() local 339 return mbx->ops.enqueue_tx(hw, mbx, msg); in fm10k_update_mc_addr_vf() 351 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_update_int_moderator_vf() local 358 mbx->ops.enqueue_tx(hw, mbx, msg); in fm10k_update_int_moderator_vf() 379 struct fm10k_mbx_info __always_unused *mbx) in fm10k_msg_lport_state_vf() argument [all …]
|
D | fm10k_iov.c | 9 struct fm10k_mbx_info *mbx) in fm10k_iov_msg_error() argument 11 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; in fm10k_iov_msg_error() 18 return fm10k_tlv_msg_error(hw, results, mbx); in fm10k_iov_msg_error() 34 struct fm10k_mbx_info *mbx) in fm10k_iov_msg_queue_mac_vlan() argument 36 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; in fm10k_iov_msg_queue_mac_vlan() 188 vf_info->mbx.ops.connect(hw, &vf_info->mbx); in fm10k_iov_event() 228 struct fm10k_mbx_info *mbx = &vf_info->mbx; in fm10k_iov_mbx() local 232 hw->mbx.ops.process(hw, &hw->mbx); in fm10k_iov_mbx() 241 if (!mbx->timeout) { in fm10k_iov_mbx() 243 mbx->ops.connect(hw, mbx); in fm10k_iov_mbx() [all …]
|
D | fm10k_pf.c | 307 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_update_xc_addr_pf() local 336 return mbx->ops.enqueue_tx(hw, mbx, msg); in fm10k_update_xc_addr_pf() 394 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_update_xcast_mode_pf() local 415 return mbx->ops.enqueue_tx(hw, mbx, msg); in fm10k_update_xcast_mode_pf() 462 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_update_lport_state_pf() local 486 return mbx->ops.enqueue_tx(hw, mbx, msg); in fm10k_update_lport_state_pf() 877 if (vf_info->mbx.ops.enqueue_tx) { in fm10k_iov_assign_default_mac_vlan_pf() 878 err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); in fm10k_iov_assign_default_mac_vlan_pf() 960 vf_info->mbx.timeout = 0; in fm10k_iov_reset_resources_pf() 961 if (vf_info->mbx.ops.disconnect) in fm10k_iov_reset_resources_pf() [all …]
|
D | fm10k_pci.c | 802 if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) { in fm10k_macvlan_task() 803 hw->mbx.ops.process(hw, &hw->mbx); in fm10k_macvlan_task() 1198 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_msix_mbx_vf() local 1207 mbx->ops.process(hw, mbx); in fm10k_msix_mbx_vf() 1299 vf_info->mbx.ops.connect(hw, &vf_info->mbx); in fm10k_handle_fault() 1371 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_msix_mbx_pf() local 1388 s32 err = mbx->ops.process(hw, mbx); in fm10k_msix_mbx_pf() 1435 hw->mbx.ops.disconnect(hw, &hw->mbx); in fm10k_mbx_free_irq() 1459 struct fm10k_mbx_info *mbx) in fm10k_mbx_mac_addr() argument 1466 err = fm10k_msg_mac_vlan_vf(hw, results, mbx); in fm10k_mbx_mac_addr() [all …]
|
D | fm10k_common.c | 476 struct fm10k_mbx_info *mbx = &hw->mbx; in fm10k_get_host_state_generic() local 482 mbx->ops.process(hw, mbx); in fm10k_get_host_state_generic() 499 if (!mbx->timeout) { in fm10k_get_host_state_generic() 505 if (mbx->state != FM10K_STATE_OPEN) in fm10k_get_host_state_generic()
|
D | fm10k_tlv.c | 545 struct fm10k_mbx_info *mbx, in fm10k_tlv_msg_parse() argument 577 return data->func(hw, results, mbx); in fm10k_tlv_msg_parse() 592 struct fm10k_mbx_info __always_unused *mbx) in fm10k_tlv_msg_error() argument 708 struct fm10k_mbx_info *mbx) in fm10k_tlv_msg_test() argument 729 &mbx->test_result); in fm10k_tlv_msg_test() 849 return mbx->ops.enqueue_tx(hw, mbx, reply); in fm10k_tlv_msg_test()
|
/linux-5.19.10/drivers/net/ethernet/intel/igb/ |
D | e1000_mbx.c | 19 struct e1000_mbx_info *mbx = &hw->mbx; in igb_read_mbx() local 23 if (size > mbx->size) in igb_read_mbx() 24 size = mbx->size; in igb_read_mbx() 26 if (mbx->ops.read) in igb_read_mbx() 27 ret_val = mbx->ops.read(hw, msg, size, mbx_id, unlock); in igb_read_mbx() 43 struct e1000_mbx_info *mbx = &hw->mbx; in igb_write_mbx() local 46 if (size > mbx->size) in igb_write_mbx() 49 else if (mbx->ops.write) in igb_write_mbx() 50 ret_val = mbx->ops.write(hw, msg, size, mbx_id); in igb_write_mbx() 64 struct e1000_mbx_info *mbx = &hw->mbx; in igb_check_for_msg() local [all …]
|
/linux-5.19.10/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_mbx.c | 20 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbe_read_mbx() local 23 if (size > mbx->size) in ixgbe_read_mbx() 24 size = mbx->size; in ixgbe_read_mbx() 26 if (!mbx->ops) in ixgbe_read_mbx() 29 return mbx->ops->read(hw, msg, size, mbx_id); in ixgbe_read_mbx() 43 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbe_write_mbx() local 45 if (size > mbx->size) in ixgbe_write_mbx() 48 if (!mbx->ops) in ixgbe_write_mbx() 51 return mbx->ops->write(hw, msg, size, mbx_id); in ixgbe_write_mbx() 63 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbe_check_for_msg() local [all …]
|
/linux-5.19.10/drivers/net/ethernet/intel/ixgbevf/ |
D | mbx.c | 15 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_poll_for_msg() local 16 int countdown = mbx->timeout; in ixgbevf_poll_for_msg() 18 if (!countdown || !mbx->ops.check_for_msg) in ixgbevf_poll_for_msg() 21 while (countdown && mbx->ops.check_for_msg(hw)) { in ixgbevf_poll_for_msg() 23 udelay(mbx->udelay); in ixgbevf_poll_for_msg() 37 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_poll_for_ack() local 38 int countdown = mbx->timeout; in ixgbevf_poll_for_ack() 40 if (!countdown || !mbx->ops.check_for_ack) in ixgbevf_poll_for_ack() 43 while (countdown && mbx->ops.check_for_ack(hw)) { in ixgbevf_poll_for_ack() 45 udelay(mbx->udelay); in ixgbevf_poll_for_ack() [all …]
|
D | vf.c | 66 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_reset_hw_vf() local 77 hw->mbx.ops.init_params(hw); in ixgbevf_reset_hw_vf() 78 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy, in ixgbevf_reset_hw_vf() 85 while (!mbx->ops.check_for_rst(hw) && timeout) { in ixgbevf_reset_hw_vf() 94 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; in ixgbevf_reset_hw_vf() 705 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_check_mac_link_vf() local 712 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) in ixgbevf_check_mac_link_vf() 753 if (mbx->ops.read(hw, &in_msg, 1)) { in ixgbevf_check_mac_link_vf() 767 if (!mbx->timeout) { in ixgbevf_check_mac_link_vf() 796 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_hv_check_mac_link_vf() local [all …]
|
/linux-5.19.10/drivers/crypto/cavium/cpt/ |
D | cptvf_mbox.c | 8 static void cptvf_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx) in cptvf_send_msg_to_pf() argument 12 mbx->msg); in cptvf_send_msg_to_pf() 14 mbx->data); in cptvf_send_msg_to_pf() 20 struct cpt_mbox mbx = {}; in cptvf_handle_mbox_intr() local 26 mbx.msg = cpt_read_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 0)); in cptvf_handle_mbox_intr() 27 mbx.data = cpt_read_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 1)); in cptvf_handle_mbox_intr() 29 __func__, mbx.msg); in cptvf_handle_mbox_intr() 30 switch (mbx.msg) { in cptvf_handle_mbox_intr() 34 cptvf->vfid = mbx.data; in cptvf_handle_mbox_intr() 40 cptvf->vftype = mbx.data; in cptvf_handle_mbox_intr() [all …]
|
D | cptpf_mbox.c | 9 struct cpt_mbox *mbx) in cpt_send_msg_to_vf() argument 13 mbx->data); in cpt_send_msg_to_vf() 14 cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0), mbx->msg); in cpt_send_msg_to_vf() 21 struct cpt_mbox *mbx) in cpt_mbox_send_ack() argument 23 mbx->data = 0ull; in cpt_mbox_send_ack() 24 mbx->msg = CPT_MBOX_MSG_TYPE_ACK; in cpt_mbox_send_ack() 25 cpt_send_msg_to_vf(cpt, vf, mbx); in cpt_mbox_send_ack() 89 struct cpt_mbox mbx = {}; in cpt_handle_mbox_intr() local 96 mbx.msg = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0)); in cpt_handle_mbox_intr() 97 mbx.data = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1)); in cpt_handle_mbox_intr() [all …]
|
/linux-5.19.10/drivers/crypto/marvell/octeontx/ |
D | otx_cptvf_mbox.c | 76 struct otx_cpt_mbox *mbx) in cptvf_send_msg_to_pf() argument 79 writeq(mbx->msg, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0)); in cptvf_send_msg_to_pf() 80 writeq(mbx->data, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1)); in cptvf_send_msg_to_pf() 86 struct otx_cpt_mbox mbx = {}; in otx_cptvf_handle_mbox_intr() local 92 mbx.msg = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0)); in otx_cptvf_handle_mbox_intr() 93 mbx.data = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1)); in otx_cptvf_handle_mbox_intr() 95 dump_mbox_msg(&mbx, -1); in otx_cptvf_handle_mbox_intr() 97 switch (mbx.msg) { in otx_cptvf_handle_mbox_intr() 100 cptvf->num_vfs = mbx.data; in otx_cptvf_handle_mbox_intr() 104 cptvf->vfid = mbx.data; in otx_cptvf_handle_mbox_intr() [all …]
|
D | otx_cptpf_mbox.c | 75 struct otx_cpt_mbox *mbx) in otx_cpt_send_msg_to_vf() argument 78 writeq(mbx->data, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1)); in otx_cpt_send_msg_to_vf() 79 writeq(mbx->msg, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0)); in otx_cpt_send_msg_to_vf() 87 struct otx_cpt_mbox *mbx) in otx_cpt_mbox_send_ack() argument 89 mbx->data = 0ull; in otx_cpt_mbox_send_ack() 90 mbx->msg = OTX_CPT_MSG_ACK; in otx_cpt_mbox_send_ack() 91 otx_cpt_send_msg_to_vf(cpt, vf, mbx); in otx_cpt_mbox_send_ack() 96 struct otx_cpt_mbox *mbx) in otx_cptpf_mbox_send_nack() argument 98 mbx->data = 0ull; in otx_cptpf_mbox_send_nack() 99 mbx->msg = OTX_CPT_MSG_NACK; in otx_cptpf_mbox_send_nack() [all …]
|
/linux-5.19.10/drivers/net/ethernet/intel/igbvf/ |
D | mbx.c | 14 struct e1000_mbx_info *mbx = &hw->mbx; in e1000_poll_for_msg() local 15 int countdown = mbx->timeout; in e1000_poll_for_msg() 17 if (!mbx->ops.check_for_msg) in e1000_poll_for_msg() 20 while (countdown && mbx->ops.check_for_msg(hw)) { in e1000_poll_for_msg() 22 udelay(mbx->usec_delay); in e1000_poll_for_msg() 27 mbx->timeout = 0; in e1000_poll_for_msg() 40 struct e1000_mbx_info *mbx = &hw->mbx; in e1000_poll_for_ack() local 41 int countdown = mbx->timeout; in e1000_poll_for_ack() 43 if (!mbx->ops.check_for_ack) in e1000_poll_for_ack() 46 while (countdown && mbx->ops.check_for_ack(hw)) { in e1000_poll_for_ack() [all …]
|
D | vf.c | 62 hw->mbx.ops.init_params = e1000_init_mbx_params_vf; in e1000_init_function_pointers_vf() 104 struct e1000_mbx_info *mbx = &hw->mbx; in e1000_reset_hw_vf() local 116 while (!mbx->ops.check_for_rst(hw) && timeout) { in e1000_reset_hw_vf() 123 mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; in e1000_reset_hw_vf() 127 mbx->ops.write_posted(hw, msgbuf, 1); in e1000_reset_hw_vf() 132 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); in e1000_reset_hw_vf() 205 struct e1000_mbx_info *mbx = &hw->mbx; in e1000_update_mc_addr_list_vf() local 231 ret_val = mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); in e1000_update_mc_addr_list_vf() 233 mbx->ops.read_posted(hw, msgbuf, 1); in e1000_update_mc_addr_list_vf() 244 struct e1000_mbx_info *mbx = &hw->mbx; in e1000_set_vfta_vf() local [all …]
|
/linux-5.19.10/drivers/net/ethernet/cavium/thunder/ |
D | nic_main.c | 133 static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) in nic_send_msg_to_vf() argument 136 u64 *msg = (u64 *)mbx; in nic_send_msg_to_vf() 160 union nic_mbx mbx = {}; in nic_mbx_send_ready() local 164 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; in nic_mbx_send_ready() 165 mbx.nic_cfg.vf_id = vf; in nic_mbx_send_ready() 167 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; in nic_mbx_send_ready() 175 ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac); in nic_mbx_send_ready() 177 mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false; in nic_mbx_send_ready() 178 mbx.nic_cfg.node_id = nic->node; in nic_mbx_send_ready() 180 mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en; in nic_mbx_send_ready() [all …]
|
D | nicvf_main.c | 119 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) in nicvf_write_to_mbx() argument 121 u64 *msg = (u64 *)mbx; in nicvf_write_to_mbx() 127 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) in nicvf_send_msg_to_pf() argument 137 nicvf_write_to_mbx(nic, mbx); in nicvf_send_msg_to_pf() 145 (mbx->msg.msg & 0xFF), nic->vf_id); in nicvf_send_msg_to_pf() 155 (mbx->msg.msg & 0xFF), nic->vf_id); in nicvf_send_msg_to_pf() 169 union nic_mbx mbx = {}; in nicvf_check_pf_ready() local 171 mbx.msg.msg = NIC_MBOX_MSG_READY; in nicvf_check_pf_ready() 172 if (nicvf_send_msg_to_pf(nic, &mbx)) { in nicvf_check_pf_ready() 183 union nic_mbx mbx = {}; in nicvf_send_cfg_done() local [all …]
|
D | nicvf_queues.c | 642 union nic_mbx mbx = {}; in nicvf_reclaim_rcv_queue() local 645 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; in nicvf_reclaim_rcv_queue() 646 nicvf_send_msg_to_pf(nic, &mbx); in nicvf_reclaim_rcv_queue() 733 union nic_mbx mbx = {}; in nicvf_reset_rcv_queue_stats() local 736 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; in nicvf_reset_rcv_queue_stats() 737 mbx.reset_stat.rx_stat_mask = 0x3FFF; in nicvf_reset_rcv_queue_stats() 738 mbx.reset_stat.tx_stat_mask = 0x1F; in nicvf_reset_rcv_queue_stats() 739 mbx.reset_stat.rq_stat_mask = 0xFFFF; in nicvf_reset_rcv_queue_stats() 740 mbx.reset_stat.sq_stat_mask = 0xFFFF; in nicvf_reset_rcv_queue_stats() 741 nicvf_send_msg_to_pf(nic, &mbx); in nicvf_reset_rcv_queue_stats() [all …]
|
/linux-5.19.10/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_vfpf.c | 1125 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); in bnx2x_vf_mbx_resp_single_tlv() local 1129 type = mbx->first_tlv.tl.type; in bnx2x_vf_mbx_resp_single_tlv() 1133 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length); in bnx2x_vf_mbx_resp_single_tlv() 1134 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, in bnx2x_vf_mbx_resp_single_tlv() 1142 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); in bnx2x_vf_mbx_resp_send_msg() local 1143 struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; in bnx2x_vf_mbx_resp_send_msg() 1150 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); in bnx2x_vf_mbx_resp_send_msg() 1155 vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + in bnx2x_vf_mbx_resp_send_msg() 1156 mbx->first_tlv.resp_msg_offset; in bnx2x_vf_mbx_resp_send_msg() 1157 pf_addr = mbx->msg_mapping + in bnx2x_vf_mbx_resp_send_msg() [all …]
|
/linux-5.19.10/drivers/net/ethernet/qlogic/qlcnic/ |
D | qlcnic_83xx_hw.c | 494 static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx) in qlcnic_83xx_notify_mbx_response() argument 496 mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; in qlcnic_83xx_notify_mbx_response() 497 complete(&mbx->completion); in qlcnic_83xx_notify_mbx_response() 503 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; in qlcnic_83xx_poll_process_aen() local 506 spin_lock_irqsave(&mbx->aen_lock, flags); in qlcnic_83xx_poll_process_aen() 515 if (mbx->rsp_status != rsp_status) in qlcnic_83xx_poll_process_aen() 516 qlcnic_83xx_notify_mbx_response(mbx); in qlcnic_83xx_poll_process_aen() 520 spin_unlock_irqrestore(&mbx->aen_lock, flags); in qlcnic_83xx_poll_process_aen() 851 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; in qlcnic_83xx_issue_cmd() local 856 if (!mbx) in qlcnic_83xx_issue_cmd() [all …]
|
D | qlcnic_sriov_common.c | 313 struct qlcnic_mailbox *mbx = ahw->mailbox; in qlcnic_sriov_post_bc_msg() local 326 err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout); in qlcnic_sriov_post_bc_msg() 340 flush_workqueue(mbx->work_q); in qlcnic_sriov_post_bc_msg() 716 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) in qlcnic_sriov_alloc_bc_mbx_args() argument 726 mbx->op_type = QLC_BC_CMD; in qlcnic_sriov_alloc_bc_mbx_args() 727 mbx->req.num = mbx_tbl[i].in_args; in qlcnic_sriov_alloc_bc_mbx_args() 728 mbx->rsp.num = mbx_tbl[i].out_args; in qlcnic_sriov_alloc_bc_mbx_args() 729 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), in qlcnic_sriov_alloc_bc_mbx_args() 731 if (!mbx->req.arg) in qlcnic_sriov_alloc_bc_mbx_args() 733 mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), in qlcnic_sriov_alloc_bc_mbx_args() [all …]
|
/linux-5.19.10/drivers/net/ethernet/qlogic/qed/ |
D | qed_sriov.c | 1200 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_send_response() local 1204 mbx->reply_virt->default_resp.hdr.status = status; in qed_iov_send_response() 1206 qed_dp_tlv_list(p_hwfn, mbx->reply_virt); in qed_iov_send_response() 1214 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), in qed_iov_send_response() 1215 mbx->req_virt->first_tlv.reply_address + in qed_iov_send_response() 1228 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, in qed_iov_send_response() 1229 mbx->req_virt->first_tlv.reply_address, in qed_iov_send_response() 1307 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; in qed_iov_prepare_resp() local 1309 mbx->offset = (u8 *)mbx->reply_virt; in qed_iov_prepare_resp() 1311 qed_add_tlv(p_hwfn, &mbx->offset, type, length); in qed_iov_prepare_resp() [all …]
|
/linux-5.19.10/Documentation/devicetree/bindings/net/can/ |
D | ti_hecc.txt | 10 and 'mbx' 11 - reg-names :"hecc", "hecc-ram", "mbx" 29 reg-names = "hecc", "hecc-ram", "mbx";
|