Lines Matching refs:umem_odp

230 	struct ib_umem_odp *umem_odp =  in mlx5_ib_invalidate_range()  local
245 mutex_lock(&umem_odp->umem_mutex); in mlx5_ib_invalidate_range()
251 if (!umem_odp->npages) in mlx5_ib_invalidate_range()
253 mr = umem_odp->private; in mlx5_ib_invalidate_range()
255 start = max_t(u64, ib_umem_start(umem_odp), range->start); in mlx5_ib_invalidate_range()
256 end = min_t(u64, ib_umem_end(umem_odp), range->end); in mlx5_ib_invalidate_range()
264 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { in mlx5_ib_invalidate_range()
265 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in mlx5_ib_invalidate_range()
272 if (umem_odp->dma_list[idx] & in mlx5_ib_invalidate_range()
307 ib_umem_odp_unmap_dma_pages(umem_odp, start, end); in mlx5_ib_invalidate_range()
309 if (unlikely(!umem_odp->npages && mr->parent)) in mlx5_ib_invalidate_range()
312 mutex_unlock(&umem_odp->umem_mutex); in mlx5_ib_invalidate_range()
485 struct ib_umem_odp *umem_odp; in mlx5_ib_alloc_implicit_mr() local
492 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags); in mlx5_ib_alloc_implicit_mr()
493 if (IS_ERR(umem_odp)) in mlx5_ib_alloc_implicit_mr()
494 return ERR_CAST(umem_odp); in mlx5_ib_alloc_implicit_mr()
500 ib_umem_odp_release(umem_odp); in mlx5_ib_alloc_implicit_mr()
507 imr->umem = &umem_odp->umem; in mlx5_ib_alloc_implicit_mr()
623 struct ib_umem_odp *umem_odp; in pagefault_implicit_mr() local
643 umem_odp = to_ib_umem_odp(mtt->umem); in pagefault_implicit_mr()
644 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) - in pagefault_implicit_mr()
647 ret = pagefault_real_mr(mtt, umem_odp, user_va, len, in pagefault_implicit_mr()