Lines Matching refs:max_vqs

223 	return mvdev->max_vqs;  in ctrl_vq_idx()
1280 for (i = 0; i < ndev->mvdev.max_vqs; i++) in suspend_vqs()
2182 for (i = 0; i < mvdev->max_vqs; i++) { in setup_virtqueues()
2202 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
2216 mvdev->max_idx = mvdev->max_vqs; in update_cvq_info()
2313 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
2324 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
2336 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
2454 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
2530 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_reset()
2825 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in init_mvqs()
2832 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
2953 u32 max_vqs; in mlx5_vdpa_dev_add() local
2967 max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues), in mlx5_vdpa_dev_add()
2969 if (max_vqs < 2) { in mlx5_vdpa_dev_add()
2972 max_vqs); in mlx5_vdpa_dev_add()
2977 if (add_config->net.max_vq_pairs > max_vqs / 2) in mlx5_vdpa_dev_add()
2979 max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs); in mlx5_vdpa_dev_add()
2981 max_vqs = 2; in mlx5_vdpa_dev_add()
2990 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
2994 ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); in mlx5_vdpa_dev_add()
2995 ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3039 config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2); in mlx5_vdpa_dev_add()
3066 err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1); in mlx5_vdpa_dev_add()