Searched refs:cvq (Results 1 – 6 of 6) sorted by relevance
/linux-6.6.21/drivers/vdpa/vdpa_sim/ |
D | vdpa_sim_net.c | 109 struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2]; in vdpasim_handle_ctrl_mac() local 115 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, in vdpasim_handle_ctrl_mac() 129 struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2]; in vdpasim_handle_cvq() local 140 if (!cvq->ready) in vdpasim_handle_cvq() 144 err = vringh_getdesc_iotlb(&cvq->vring, &cvq->in_iov, in vdpasim_handle_cvq() 145 &cvq->out_iov, in vdpasim_handle_cvq() 146 &cvq->head, GFP_ATOMIC); in vdpasim_handle_cvq() 151 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, &ctrl, in vdpasim_handle_cvq() 174 write = vringh_iov_push_iotlb(&cvq->vring, &cvq->out_iov, in vdpasim_handle_cvq() 176 vringh_complete_iotlb(&cvq->vring, cvq->head, write); in vdpasim_handle_cvq() [all …]
|
/linux-6.6.21/drivers/vdpa/mlx5/net/ |
D | mlx5_vnet.c | 1801 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mac() local 1810 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN); in handle_ctrl_mac() 1929 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq() local 1948 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq)); in handle_ctrl_mq() 1977 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_vlan() local 1987 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan() 1998 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan() 2019 struct mlx5_control_vq *cvq; in mlx5_cvq_kick_handler() local 2027 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler() 2037 if (!cvq->ready) in mlx5_cvq_kick_handler() [all …]
|
/linux-6.6.21/drivers/vdpa/mlx5/core/ |
D | resources.c | 232 mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0); in init_ctrl_vq() 233 if (!mvdev->cvq.iotlb) in init_ctrl_vq() 236 spin_lock_init(&mvdev->cvq.iommu_lock); in init_ctrl_vq() 237 vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock); in init_ctrl_vq() 244 vhost_iotlb_free(mvdev->cvq.iotlb); in cleanup_ctrl_vq()
|
D | mlx5_vdpa.h | 97 struct mlx5_control_vq cvq; member
|
D | mr.c | 460 err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW); in dup_iotlb() 466 err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last, in dup_iotlb() 476 vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX); in prune_iotlb()
|
/linux-6.6.21/drivers/net/ |
D | virtio_net.c | 239 struct virtqueue *cvq; member 2491 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); in virtnet_send_command() 2498 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command() 2504 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command() 2505 !virtqueue_is_broken(vi->cvq)) in virtnet_send_command() 4154 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
|