Lines Matching refs:cvq

1617 	struct mlx5_control_vq *cvq = &mvdev->cvq;  in handle_ctrl_mac()  local
1626 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN); in handle_ctrl_mac()
1745 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq() local
1764 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq)); in handle_ctrl_mq()
1793 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_vlan() local
1800 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
1811 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
1832 struct mlx5_control_vq *cvq; in mlx5_cvq_kick_handler() local
1840 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler()
1850 if (!cvq->ready) in mlx5_cvq_kick_handler()
1854 err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, in mlx5_cvq_kick_handler()
1859 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &ctrl, sizeof(ctrl)); in mlx5_cvq_kick_handler()
1863 cvq->received_desc++; in mlx5_cvq_kick_handler()
1881 write = vringh_iov_push_iotlb(&cvq->vring, &cvq->wiov, &status, sizeof(status)); in mlx5_cvq_kick_handler()
1882 vringh_complete_iotlb(&cvq->vring, cvq->head, write); in mlx5_cvq_kick_handler()
1883 vringh_kiov_cleanup(&cvq->riov); in mlx5_cvq_kick_handler()
1884 vringh_kiov_cleanup(&cvq->wiov); in mlx5_cvq_kick_handler()
1886 if (vringh_need_notify_iotlb(&cvq->vring)) in mlx5_cvq_kick_handler()
1887 vringh_notify(&cvq->vring); in mlx5_cvq_kick_handler()
1889 cvq->completed_desc++; in mlx5_cvq_kick_handler()
1908 if (!mvdev->wq || !mvdev->cvq.ready) in mlx5_vdpa_kick_vq()
1933 mvdev->cvq.desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
1934 mvdev->cvq.device_addr = device_area; in mlx5_vdpa_set_vq_address()
1935 mvdev->cvq.driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
1966 mvdev->cvq.event_cb = *cb; in mlx5_vdpa_set_vq_cb()
1971 struct mlx5_control_vq *cvq = container_of(vring, struct mlx5_control_vq, vring); in mlx5_cvq_notify() local
1973 if (!cvq->event_cb.callback) in mlx5_cvq_notify()
1976 cvq->event_cb.callback(cvq->event_cb.private); in mlx5_cvq_notify()
1981 struct mlx5_control_vq *cvq = &mvdev->cvq; in set_cvq_ready() local
1983 cvq->ready = ready; in set_cvq_ready()
1987 cvq->vring.notify = mlx5_cvq_notify; in set_cvq_ready()
2023 return mvdev->cvq.ready; in mlx5_vdpa_get_vq_ready()
2039 mvdev->cvq.vring.last_avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2066 state->split.avail_index = mvdev->cvq.vring.last_avail_idx; in mlx5_vdpa_get_vq_state()
2457 ndev->mvdev.cvq.ready = false; in clear_vqs_ready()
2462 struct mlx5_control_vq *cvq = &mvdev->cvq; in setup_cvq_vring() local
2466 err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, in setup_cvq_vring()
2468 (struct vring_desc *)(uintptr_t)cvq->desc_addr, in setup_cvq_vring()
2469 (struct vring_avail *)(uintptr_t)cvq->driver_addr, in setup_cvq_vring()
2470 (struct vring_used *)(uintptr_t)cvq->device_addr); in setup_cvq_vring()
2528 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_reset()
2529 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_reset()
2688 struct mlx5_control_vq *cvq; in mlx5_vdpa_get_vendor_vq_stats() local
2701 cvq = &mvdev->cvq; in mlx5_vdpa_get_vendor_vq_stats()
2702 received_desc = cvq->received_desc; in mlx5_vdpa_get_vendor_vq_stats()
2703 completed_desc = cvq->completed_desc; in mlx5_vdpa_get_vendor_vq_stats()