/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | htb.c | 17 u16 qid; member 40 if (node->qid == MLX5E_QOS_QID_INNER) in mlx5e_htb_enumerate_leaves() 42 err = callback(data, node->qid, node->hw_id); in mlx5e_htb_enumerate_leaves() 70 mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid, in mlx5e_htb_node_create_leaf() argument 81 node->qid = qid; in mlx5e_htb_node_create_leaf() 82 __set_bit(qid, htb->qos_used_qids); in mlx5e_htb_node_create_leaf() 100 node->qid = MLX5E_QOS_QID_INNER; in mlx5e_htb_node_create_root() 134 if (node->qid != MLX5E_QOS_QID_INNER) { in mlx5e_htb_node_delete() 135 __clear_bit(node->qid, htb->qos_used_qids); in mlx5e_htb_node_delete() 150 u16 qid; in mlx5e_htb_get_txq_by_classid() local [all …]
|
D | qos.c | 38 u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) in mlx5e_qid_from_qos() argument 49 return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid; in mlx5e_qid_from_qos() 54 static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid) in mlx5e_get_qos_sq() argument 61 ix = qid % params->num_channels; in mlx5e_get_qos_sq() 62 qid /= params->num_channels; in mlx5e_get_qos_sq() 66 return mlx5e_state_dereference(priv, qos_sqs[qid]); in mlx5e_get_qos_sq() 76 int txq_ix, ix, qid, err = 0; in mlx5e_open_qos_sq() local 111 qid = node_qid / params->num_channels; in mlx5e_open_qos_sq() 135 rcu_assign_pointer(qos_sqs[qid], sq); in mlx5e_open_qos_sq() 157 u16 qid; in mlx5e_activate_qos_sq() local [all …]
|
D | qos.h | 24 void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid); 25 void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid); 26 void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq); 27 void mlx5e_reset_qdisc(struct net_device *dev, u16 qid); 38 u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
|
/linux-6.6.21/drivers/vdpa/pds/ |
D | vdpa_dev.c | 72 static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, in pds_vdpa_set_vq_address() argument 77 pdsv->vqs[qid].desc_addr = desc_addr; in pds_vdpa_set_vq_address() 78 pdsv->vqs[qid].avail_addr = driver_addr; in pds_vdpa_set_vq_address() 79 pdsv->vqs[qid].used_addr = device_addr; in pds_vdpa_set_vq_address() 84 static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num) in pds_vdpa_set_vq_num() argument 88 pdsv->vqs[qid].q_len = num; in pds_vdpa_set_vq_num() 91 static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) in pds_vdpa_kick_vq() argument 95 iowrite16(qid, pdsv->vqs[qid].notify); in pds_vdpa_kick_vq() 98 static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, in pds_vdpa_set_vq_cb() argument 103 pdsv->vqs[qid].event_cb = *cb; in pds_vdpa_set_vq_cb() [all …]
|
D | cmds.c | 125 int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx, in pds_vdpa_cmd_init_vq() argument 134 .vdpa_vq_init.qid = cpu_to_le16(qid), in pds_vdpa_cmd_init_vq() 139 .vdpa_vq_init.intr_index = cpu_to_le16(qid), in pds_vdpa_cmd_init_vq() 147 __func__, qid, ilog2(vq_info->q_len), in pds_vdpa_cmd_init_vq() 154 qid, comp.status, ERR_PTR(err)); in pds_vdpa_cmd_init_vq() 159 int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx, in pds_vdpa_cmd_reset_vq() argument 168 .vdpa_vq_reset.qid = cpu_to_le16(qid), in pds_vdpa_cmd_reset_vq() 177 qid, comp.status, ERR_PTR(err)); in pds_vdpa_cmd_reset_vq()
|
/linux-6.6.21/arch/s390/include/asm/ |
D | ap.h | 133 static inline struct ap_queue_status ap_tapq(ap_qid_t qid, struct ap_tapq_gr2 *info) in ap_tapq() argument 145 : [qid] "d" (qid) in ap_tapq() 160 static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int tbit, in ap_test_queue() argument 164 qid |= 1UL << 23; /* set T bit*/ in ap_test_queue() 165 return ap_tapq(qid, info); in ap_test_queue() 175 static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit) in ap_rapq() argument 177 unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */ in ap_rapq() 200 static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit) in ap_zapq() argument 202 unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */ in ap_zapq() 295 static inline struct ap_queue_status ap_aqic(ap_qid_t qid, in ap_aqic() argument [all …]
|
/linux-6.6.21/drivers/net/ethernet/marvell/prestera/ |
D | prestera_pci.c | 242 static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid) in prestera_fw_evtq_len() argument 244 return fw->evt_queue[qid].len; in prestera_fw_evtq_len() 247 static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid) in prestera_fw_evtq_avail() argument 249 u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid)); in prestera_fw_evtq_avail() 250 u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); in prestera_fw_evtq_avail() 252 return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid)); in prestera_fw_evtq_avail() 256 u8 qid, u32 idx) in prestera_fw_evtq_rd_set() argument 258 u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1); in prestera_fw_evtq_rd_set() 260 prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx); in prestera_fw_evtq_rd_set() 263 static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid) in prestera_fw_evtq_buf() argument [all …]
|
/linux-6.6.21/drivers/infiniband/hw/cxgb4/ |
D | resource.c | 111 u32 qid; in c4iw_get_cqid() local 119 qid = entry->qid; in c4iw_get_cqid() 122 qid = c4iw_get_resource(&rdev->resource.qid_table); in c4iw_get_cqid() 123 if (!qid) in c4iw_get_cqid() 126 rdev->stats.qid.cur += rdev->qpmask + 1; in c4iw_get_cqid() 128 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid() 132 entry->qid = i; in c4iw_get_cqid() 143 entry->qid = qid; in c4iw_get_cqid() 145 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid() 149 entry->qid = i; in c4iw_get_cqid() [all …]
|
/linux-6.6.21/drivers/nvme/host/ |
D | auth.c | 27 int qid; member 50 #define nvme_auth_flags_from_qid(qid) \ argument 51 (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED 52 #define nvme_auth_queue_from_qid(ctrl, qid) \ argument 53 (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q 61 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, in nvme_auth_submit() argument 65 blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); in nvme_auth_submit() 66 struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); in nvme_auth_submit() 82 qid == 0 ? NVME_QID_ANY : qid, in nvme_auth_submit() 86 "qid %d auth_send failed with status %d\n", qid, ret); in nvme_auth_submit() [all …]
|
D | trace.h | 26 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument 29 ((qid) ? \ 53 __field(int, qid) 64 __entry->qid = nvme_req_qid(req); 77 __entry->qid, __entry->cid, __entry->nsid, 79 show_opcode_name(__entry->qid, __entry->opcode, 81 parse_nvme_cmd(__entry->qid, __entry->opcode, 91 __field(int, qid) 100 __entry->qid = nvme_req_qid(req); 110 __entry->qid, __entry->cid, __entry->result, [all …]
|
/linux-6.6.21/drivers/vdpa/ifcvf/ |
D | ifcvf_base.c | 13 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector) in ifcvf_set_vq_vector() argument 17 vp_iowrite16(qid, &cfg->queue_select); in ifcvf_set_vq_vector() 72 static u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid) in ifcvf_get_vq_size() argument 76 vp_iowrite16(qid, &hw->common_cfg->queue_select); in ifcvf_get_vq_size() 88 u16 queue_size, max_size, qid; in ifcvf_get_max_vq_size() local 91 for (qid = 1; qid < hw->nr_vring; qid++) { in ifcvf_get_max_vq_size() 92 queue_size = ifcvf_get_vq_size(hw, qid); in ifcvf_get_max_vq_size() 329 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) in ifcvf_get_vq_state() argument 334 last_avail_idx = vp_ioread16(&lm_cfg->vq_state_region + qid * 2); in ifcvf_get_vq_state() 339 int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) in ifcvf_set_vq_state() argument [all …]
|
D | ifcvf_base.h | 107 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid); 119 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); 120 int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num); 124 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector); 126 void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num); 127 int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area, 129 bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid); 130 void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
|
/linux-6.6.21/drivers/s390/crypto/ |
D | ap_queue.c | 52 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind)); in ap_queue_enable_irq() 64 AP_QID_CARD(aq->qid), in ap_queue_enable_irq() 65 AP_QID_QUEUE(aq->qid)); in ap_queue_enable_irq() 88 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen, in __ap_send() argument 92 qid |= 0x400000UL; in __ap_send() 93 return ap_nqap(qid, psmid, msg, msglen); in __ap_send() 127 status = ap_dqap(aq->qid, &aq->reply->psmid, in ap_sm_recv() 158 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_recv() 210 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_read() 225 ap_qid_t qid = aq->qid; in ap_sm_write() local [all …]
|
D | zcrypt_api.c | 646 int cpen, qpen, qid = 0, rc = -ENODEV; in zcrypt_rsa_modexpo() local 700 AP_QID_QUEUE(zq->queue->qid))) in zcrypt_rsa_modexpo() 704 tr->last_qid == zq->queue->qid) ? in zcrypt_rsa_modexpo() 724 qid = pref_zq->queue->qid; in zcrypt_rsa_modexpo() 735 tr->last_qid = qid; in zcrypt_rsa_modexpo() 738 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); in zcrypt_rsa_modexpo() 751 int cpen, qpen, qid = 0, rc = -ENODEV; in zcrypt_rsa_crt() local 805 AP_QID_QUEUE(zq->queue->qid))) in zcrypt_rsa_crt() 809 tr->last_qid == zq->queue->qid) ? in zcrypt_rsa_crt() 829 qid = pref_zq->queue->qid; in zcrypt_rsa_crt() [all …]
|
D | zcrypt_msgtype6.c | 240 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); in icamex_msg_to_type6mex_msgx() 310 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); in icacrt_msg_to_type6crt_msgx() 587 __func__, AP_QID_CARD(zq->queue->qid), in convert_type86_ica() 588 AP_QID_QUEUE(zq->queue->qid), in convert_type86_ica() 594 AP_QID_CARD(zq->queue->qid), in convert_type86_ica() 595 AP_QID_QUEUE(zq->queue->qid), in convert_type86_ica() 598 __func__, AP_QID_CARD(zq->queue->qid), in convert_type86_ica() 599 AP_QID_QUEUE(zq->queue->qid), in convert_type86_ica() 739 AP_QID_CARD(zq->queue->qid), in convert_response_ica() 740 AP_QID_QUEUE(zq->queue->qid), in convert_response_ica() [all …]
|
/linux-6.6.21/drivers/scsi/lpfc/ |
D | lpfc_debugfs.h | 570 lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) in lpfc_debug_dump_wq_by_id() argument 575 if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid) in lpfc_debug_dump_wq_by_id() 578 pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); in lpfc_debug_dump_wq_by_id() 583 if (phba->sli4_hba.els_wq->queue_id == qid) { in lpfc_debug_dump_wq_by_id() 584 pr_err("ELS WQ[Qid:%d]\n", qid); in lpfc_debug_dump_wq_by_id() 589 if (phba->sli4_hba.nvmels_wq->queue_id == qid) { in lpfc_debug_dump_wq_by_id() 590 pr_err("NVME LS WQ[Qid:%d]\n", qid); in lpfc_debug_dump_wq_by_id() 604 lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid) in lpfc_debug_dump_mq_by_id() argument 606 if (phba->sli4_hba.mbx_wq->queue_id == qid) { in lpfc_debug_dump_mq_by_id() 607 printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid); in lpfc_debug_dump_mq_by_id() [all …]
|
/linux-6.6.21/drivers/nvme/target/ |
D | fabrics-cmd-auth.c | 21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); in nvmet_auth_expired_work() 41 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate() 70 __func__, ctrl->cntlid, req->sq->qid); in nvmet_auth_negotiate() 74 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate() 98 __func__, ctrl->cntlid, req->sq->qid); in nvmet_auth_negotiate() 102 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate() 107 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate() 120 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_reply() 137 ctrl->cntlid, req->sq->qid); in nvmet_auth_reply() 143 ctrl->cntlid, req->sq->qid); in nvmet_auth_reply() [all …]
|
D | trace.h | 28 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument 31 (qid ? \ 67 __field(int, qid) 80 __entry->qid = req->sq->qid; 94 __entry->qid, __entry->cid, __entry->nsid, 96 show_opcode_name(__entry->qid, __entry->opcode, 98 parse_nvme_cmd(__entry->qid, __entry->opcode, 108 __field(int, qid) 115 __entry->qid = req->cq->qid; 124 __entry->qid, __entry->cid, __entry->result, __entry->status)
|
D | fabrics-cmd.c | 140 u16 qid = le16_to_cpu(c->qid); in nvmet_install_queue() local 154 if (ctrl->sqs[qid] != NULL) { in nvmet_install_queue() 155 pr_warn("qid %u has already been created\n", qid); in nvmet_install_queue() 156 req->error_loc = offsetof(struct nvmf_connect_command, qid); in nvmet_install_queue() 176 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); in nvmet_install_queue() 177 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); in nvmet_install_queue() 188 qid, ctrl->cntlid, ret); in nvmet_install_queue() 189 ctrl->sqs[qid] = NULL; in nvmet_install_queue() 292 u16 qid = le16_to_cpu(c->qid); in nvmet_execute_io_connect() local 327 if (unlikely(qid > ctrl->subsys->max_qid)) { in nvmet_execute_io_connect() [all …]
|
/linux-6.6.21/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_process_queue_manager.c | 33 struct process_queue_manager *pqm, unsigned int qid) in get_queue_by_qid() argument 38 if ((pqn->q && pqn->q->properties.queue_id == qid) || in get_queue_by_qid() 39 (pqn->kq && pqn->kq->queue->properties.queue_id == qid)) in get_queue_by_qid() 47 unsigned int qid) in assign_queue_slot_by_qid() argument 49 if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) in assign_queue_slot_by_qid() 52 if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) { in assign_queue_slot_by_qid() 53 pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid); in assign_queue_slot_by_qid() 61 unsigned int *qid) in find_available_queue_slot() argument 77 *qid = found; in find_available_queue_slot() 95 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, in pqm_set_gws() argument [all …]
|
/linux-6.6.21/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 15 int qid = ring->ring_idx; in ixgbe_xsk_pool() local 17 if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) in ixgbe_xsk_pool() 20 return xsk_get_pool_from_qid(adapter->netdev, qid); in ixgbe_xsk_pool() 25 u16 qid) in ixgbe_xsk_pool_enable() argument 31 if (qid >= adapter->num_rx_queues) in ixgbe_xsk_pool_enable() 34 if (qid >= netdev->real_num_rx_queues || in ixgbe_xsk_pool_enable() 35 qid >= netdev->real_num_tx_queues) in ixgbe_xsk_pool_enable() 46 ixgbe_txrx_ring_disable(adapter, qid); in ixgbe_xsk_pool_enable() 48 set_bit(qid, adapter->af_xdp_zc_qps); in ixgbe_xsk_pool_enable() 51 ixgbe_txrx_ring_enable(adapter, qid); in ixgbe_xsk_pool_enable() [all …]
|
/linux-6.6.21/include/linux/ |
D | quota.h | 79 extern qid_t from_kqid(struct user_namespace *to, struct kqid qid); 80 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid); 81 extern bool qid_valid(struct kqid qid); 98 enum quota_type type, qid_t qid) in make_kqid() argument 105 kqid.uid = make_kuid(from, qid); in make_kqid() 108 kqid.gid = make_kgid(from, qid); in make_kqid() 111 kqid.projid = make_kprojid(from, qid); in make_kqid() 187 static inline bool qid_has_mapping(struct user_namespace *ns, struct kqid qid) in qid_has_mapping() argument 189 return from_kqid(ns, qid) != (qid_t) -1; in qid_has_mapping() 320 …int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structu… [all …]
|
/linux-6.6.21/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | qos.c | 98 if (node->qid == OTX2_QOS_QID_NONE) { in otx2_config_sched_shaping() 177 if (node->qid == OTX2_QOS_QID_INNER && !node->parent) { in __otx2_qos_txschq_cfg() 275 if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) { in otx2_qos_sw_node_delete() 276 __clear_bit(node->qid, pfvf->qos.qos_sq_bmap); in otx2_qos_sw_node_delete() 417 WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); in otx2_qos_alloc_root() 468 WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE); in otx2_qos_alloc_txschq_node() 501 u32 quantum, u16 qid, bool static_cfg) in otx2_qos_sw_create_leaf_node() argument 513 WRITE_ONCE(node->qid, qid); in otx2_qos_sw_create_leaf_node() 523 __set_bit(qid, pfvf->qos.qos_sq_bmap); in otx2_qos_sw_create_leaf_node() 576 u16 qid; in otx2_get_txq_by_classid() local [all …]
|
/linux-6.6.21/fs/xfs/ |
D | xfs_quotaops.c | 216 struct kqid qid, in xfs_fs_get_dqblk() argument 225 id = from_kqid(&init_user_ns, qid); in xfs_fs_get_dqblk() 226 return xfs_qm_scall_getquota(mp, id, xfs_quota_type(qid.type), qdq); in xfs_fs_get_dqblk() 233 struct kqid *qid, in xfs_fs_get_nextdqblk() argument 243 id = from_kqid(&init_user_ns, *qid); in xfs_fs_get_nextdqblk() 244 ret = xfs_qm_scall_getquota_next(mp, &id, xfs_quota_type(qid->type), in xfs_fs_get_nextdqblk() 250 *qid = make_kqid(current_user_ns(), qid->type, id); in xfs_fs_get_nextdqblk() 257 struct kqid qid, in xfs_fs_set_dqblk() argument 267 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), in xfs_fs_set_dqblk() 268 xfs_quota_type(qid.type), qdq); in xfs_fs_set_dqblk()
|
/linux-6.6.21/drivers/vdpa/alibaba/ |
D | eni_vdpa.c | 257 static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_get_vq_state() argument 263 static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_state() argument 273 if (!vp_legacy_get_queue_enable(ldev, qid) in eni_vdpa_set_vq_state() 281 static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_cb() argument 286 eni_vdpa->vring[qid].cb = *cb; in eni_vdpa_set_vq_cb() 289 static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_ready() argument 299 vp_legacy_set_queue_address(ldev, qid, 0); in eni_vdpa_set_vq_ready() 302 static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) in eni_vdpa_get_vq_ready() argument 306 return vp_legacy_get_queue_enable(ldev, qid); in eni_vdpa_get_vq_ready() 309 static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_num() argument [all …]
|