Lines Matching refs:container
80 static bool vfio_iommu_driver_allowed(struct vfio_container *container, in vfio_iommu_driver_allowed() argument
85 return container->noiommu == (driver->ops == &vfio_noiommu_ops); in vfio_iommu_driver_allowed()
148 struct vfio_container *container; in vfio_container_release() local
149 container = container_of(kref, struct vfio_container, kref); in vfio_container_release()
151 kfree(container); in vfio_container_release()
154 static void vfio_container_get(struct vfio_container *container) in vfio_container_get() argument
156 kref_get(&container->kref); in vfio_container_get()
159 static void vfio_container_put(struct vfio_container *container) in vfio_container_put() argument
161 kref_put(&container->kref, vfio_container_release); in vfio_container_put()
167 device->group->container->iommu_driver; in vfio_device_container_register()
171 device->group->container->iommu_data, device); in vfio_device_container_register()
177 device->group->container->iommu_driver; in vfio_device_container_unregister()
181 device->group->container->iommu_data, device); in vfio_device_container_unregister()
185 vfio_container_ioctl_check_extension(struct vfio_container *container, in vfio_container_ioctl_check_extension() argument
191 down_read(&container->group_lock); in vfio_container_ioctl_check_extension()
193 driver = container->iommu_driver; in vfio_container_ioctl_check_extension()
209 if (!list_empty(&container->group_list) && in vfio_container_ioctl_check_extension()
210 !vfio_iommu_driver_allowed(container, in vfio_container_ioctl_check_extension()
225 ret = driver->ops->ioctl(container->iommu_data, in vfio_container_ioctl_check_extension()
229 up_read(&container->group_lock); in vfio_container_ioctl_check_extension()
235 static int __vfio_container_attach_groups(struct vfio_container *container, in __vfio_container_attach_groups() argument
242 list_for_each_entry(group, &container->group_list, container_next) { in __vfio_container_attach_groups()
252 list_for_each_entry_continue_reverse(group, &container->group_list, in __vfio_container_attach_groups()
260 static long vfio_ioctl_set_iommu(struct vfio_container *container, in vfio_ioctl_set_iommu() argument
266 down_write(&container->group_lock); in vfio_ioctl_set_iommu()
276 if (list_empty(&container->group_list) || container->iommu_driver) { in vfio_ioctl_set_iommu()
277 up_write(&container->group_lock); in vfio_ioctl_set_iommu()
285 if (!vfio_iommu_driver_allowed(container, driver)) in vfio_ioctl_set_iommu()
309 ret = __vfio_container_attach_groups(container, driver, data); in vfio_ioctl_set_iommu()
316 container->iommu_driver = driver; in vfio_ioctl_set_iommu()
317 container->iommu_data = data; in vfio_ioctl_set_iommu()
322 up_write(&container->group_lock); in vfio_ioctl_set_iommu()
330 struct vfio_container *container = filep->private_data; in vfio_fops_unl_ioctl() local
335 if (!container) in vfio_fops_unl_ioctl()
343 ret = vfio_container_ioctl_check_extension(container, arg); in vfio_fops_unl_ioctl()
346 ret = vfio_ioctl_set_iommu(container, arg); in vfio_fops_unl_ioctl()
349 driver = container->iommu_driver; in vfio_fops_unl_ioctl()
350 data = container->iommu_data; in vfio_fops_unl_ioctl()
361 struct vfio_container *container; in vfio_fops_open() local
363 container = kzalloc(sizeof(*container), GFP_KERNEL_ACCOUNT); in vfio_fops_open()
364 if (!container) in vfio_fops_open()
367 INIT_LIST_HEAD(&container->group_list); in vfio_fops_open()
368 init_rwsem(&container->group_lock); in vfio_fops_open()
369 kref_init(&container->kref); in vfio_fops_open()
371 filep->private_data = container; in vfio_fops_open()
378 struct vfio_container *container = filep->private_data; in vfio_fops_release() local
382 vfio_container_put(container); in vfio_fops_release()
397 struct vfio_container *container; in vfio_container_from_file() local
403 container = file->private_data; in vfio_container_from_file()
404 WARN_ON(!container); /* fget ensures we don't race vfio_release */ in vfio_container_from_file()
405 return container; in vfio_container_from_file()
416 int vfio_container_attach_group(struct vfio_container *container, in vfio_container_attach_group() argument
427 down_write(&container->group_lock); in vfio_container_attach_group()
430 if (!list_empty(&container->group_list) && in vfio_container_attach_group()
431 container->noiommu != (group->type == VFIO_NO_IOMMU)) { in vfio_container_attach_group()
442 driver = container->iommu_driver; in vfio_container_attach_group()
444 ret = driver->ops->attach_group(container->iommu_data, in vfio_container_attach_group()
455 group->container = container; in vfio_container_attach_group()
457 container->noiommu = (group->type == VFIO_NO_IOMMU); in vfio_container_attach_group()
458 list_add(&group->container_next, &container->group_list); in vfio_container_attach_group()
461 vfio_container_get(container); in vfio_container_attach_group()
464 up_write(&container->group_lock); in vfio_container_attach_group()
470 struct vfio_container *container = group->container; in vfio_group_detach_container() local
476 down_write(&container->group_lock); in vfio_group_detach_container()
478 driver = container->iommu_driver; in vfio_group_detach_container()
480 driver->ops->detach_group(container->iommu_data, in vfio_group_detach_container()
486 group->container = NULL; in vfio_group_detach_container()
491 if (driver && list_empty(&container->group_list)) { in vfio_group_detach_container()
492 driver->ops->release(container->iommu_data); in vfio_group_detach_container()
494 container->iommu_driver = NULL; in vfio_group_detach_container()
495 container->iommu_data = NULL; in vfio_group_detach_container()
498 up_write(&container->group_lock); in vfio_group_detach_container()
500 vfio_container_put(container); in vfio_group_detach_container()
511 if (!group->container->iommu_driver) in vfio_group_use_container()
535 struct vfio_container *container = device->group->container; in vfio_device_container_pin_pages() local
537 struct vfio_iommu_driver *driver = container->iommu_driver; in vfio_device_container_pin_pages()
544 return driver->ops->pin_pages(container->iommu_data, iommu_group, iova, in vfio_device_container_pin_pages()
551 struct vfio_container *container = device->group->container; in vfio_device_container_unpin_pages() local
556 container->iommu_driver->ops->unpin_pages(container->iommu_data, iova, in vfio_device_container_unpin_pages()
564 struct vfio_container *container = device->group->container; in vfio_device_container_dma_rw() local
565 struct vfio_iommu_driver *driver = container->iommu_driver; in vfio_device_container_dma_rw()
569 return driver->ops->dma_rw(container->iommu_data, iova, data, len, in vfio_device_container_dma_rw()