Searched refs:ib_umem_start (Results 1 – 3 of 3) sorted by relevance
273 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), in ib_umem_odp_release()361 if (user_virt < ib_umem_start(umem_odp) || in ib_umem_odp_map_dma_and_lock()381 pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT; in ib_umem_odp_map_dma_and_lock()406 start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift; in ib_umem_odp_map_dma_and_lock()482 virt = max_t(u64, virt, ib_umem_start(umem_odp)); in ib_umem_odp_unmap_dma_pages()485 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in ib_umem_odp_unmap_dma_pages()490 unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT; in ib_umem_odp_unmap_dma_pages()
53 static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) in ib_umem_start() function66 return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> in ib_umem_odp_num_pages()
202 ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0, in free_implicit_child_mr_work()213 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT; in destroy_unused_implicit_child_mr()255 start = max_t(u64, ib_umem_start(umem_odp), range->start); in mlx5_ib_invalidate_range()265 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in mlx5_ib_invalidate_range()568 start_idx = (user_va - ib_umem_start(odp)) >> page_shift; in pagefault_real_mr()