Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 94) sorted by relevance

1234

/linux-6.1.9/net/xdp/
Dxdp_umem.c24 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument
26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
28 kvfree(umem->pgs); in xdp_umem_unpin_pages()
29 umem->pgs = NULL; in xdp_umem_unpin_pages()
32 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument
34 if (umem->user) { in xdp_umem_unaccount_pages()
35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
36 free_uid(umem->user); in xdp_umem_unaccount_pages()
40 static void xdp_umem_addr_unmap(struct xdp_umem *umem) in xdp_umem_addr_unmap() argument
42 vunmap(umem->addrs); in xdp_umem_addr_unmap()
[all …]
Dxsk_buff_pool.c56 struct xdp_umem *umem) in xp_create_and_assign_umem() argument
58 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem()
63 entries = unaligned ? umem->chunks : 0; in xp_create_and_assign_umem()
68 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem()
76 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
77 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
78 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
79 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
80 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
81 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem()
[all …]
Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem->users); in xsk_diag_put_umem()
/linux-6.1.9/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dumem.c40 struct nvkm_umem *umem; in nvkm_umem_search() local
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
55 umem = nvkm_umem(object); in nvkm_umem_search()
56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
65 struct nvkm_umem *umem = nvkm_umem(object); in nvkm_umem_unmap() local
67 if (!umem->map) in nvkm_umem_unmap()
70 if (umem->io) { in nvkm_umem_unmap()
71 if (!IS_ERR(umem->bar)) { in nvkm_umem_unmap()
[all …]
/linux-6.1.9/drivers/infiniband/core/
Dumem.c48 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
50 bool make_dirty = umem->writable && dirty; in __ib_umem_release()
55 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, in __ib_umem_release()
58 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) in __ib_umem_release()
62 sg_free_append_table(&umem->sgt_append); in __ib_umem_release()
79 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, in ib_umem_find_best_pgsz() argument
88 if (umem->is_odp) { in ib_umem_find_best_pgsz()
89 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); in ib_umem_find_best_pgsz()
103 umem->iova = va = virt; in ib_umem_find_best_pgsz()
110 bits_per((umem->length - 1 + virt) ^ virt)); in ib_umem_find_best_pgsz()
[all …]
Dumem_odp.c55 umem_odp->umem.is_odp = 1; in ib_init_umem_odp()
64 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp()
65 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp()
66 (unsigned long)umem_odp->umem.length, in ib_init_umem_odp()
91 umem_odp->umem.owning_mm, in ib_init_umem_odp()
119 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
129 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
130 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
131 umem->writable = ib_access_writable(access); in ib_umem_odp_alloc_implicit()
132 umem->owning_mm = current->mm; in ib_umem_odp_alloc_implicit()
[all …]
Dumem_dmabuf.c35 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE); in ib_umem_dmabuf_map_pages()
36 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages()
61 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg; in ib_umem_dmabuf_map_pages()
62 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
119 struct ib_umem *umem; in ib_umem_dmabuf_get() local
142 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get()
143 umem->ibdev = device; in ib_umem_dmabuf_get()
144 umem->length = size; in ib_umem_dmabuf_get()
145 umem->address = offset; in ib_umem_dmabuf_get()
146 umem->writable = ib_access_writable(access); in ib_umem_dmabuf_get()
[all …]
/linux-6.1.9/include/rdma/
Dib_umem.h33 struct ib_umem umem; member
44 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem) in to_ib_umem_dmabuf() argument
46 return container_of(umem, struct ib_umem_dmabuf, umem); in to_ib_umem_dmabuf()
50 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument
52 return umem->address & ~PAGE_MASK; in ib_umem_offset()
55 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, in ib_umem_dma_offset() argument
58 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & in ib_umem_dma_offset()
62 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, in ib_umem_num_dma_blocks() argument
65 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - in ib_umem_num_dma_blocks()
66 ALIGN_DOWN(umem->iova, pgsz))) / in ib_umem_num_dma_blocks()
[all …]
Dib_umem_odp.h13 struct ib_umem umem; member
47 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) in to_ib_umem_odp() argument
49 return container_of(umem, struct ib_umem_odp, umem); in to_ib_umem_odp()
/linux-6.1.9/tools/testing/selftests/bpf/
Dxsk.c72 struct xsk_umem *umem; member
113 int xsk_umem__fd(const struct xsk_umem *umem) in xsk_umem__fd() argument
115 return umem ? umem->fd : -EINVAL; in xsk_umem__fd()
225 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, in xsk_create_umem_rings() argument
234 &umem->config.fill_size, in xsk_create_umem_rings()
235 sizeof(umem->config.fill_size)); in xsk_create_umem_rings()
240 &umem->config.comp_size, in xsk_create_umem_rings()
241 sizeof(umem->config.comp_size)); in xsk_create_umem_rings()
249 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), in xsk_create_umem_rings()
255 fill->mask = umem->config.fill_size - 1; in xsk_create_umem_rings()
[all …]
Dxskxceiver.c262 return !!ifobj->umem->umem; in is_umem_valid()
272 static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size) in xsk_configure_umem() argument
277 .frame_size = umem->frame_size, in xsk_configure_umem()
278 .frame_headroom = umem->frame_headroom, in xsk_configure_umem()
283 if (umem->unaligned_mode) in xsk_configure_umem()
286 ret = xsk_umem__create(&umem->umem, buffer, size, in xsk_configure_umem()
287 &umem->fq, &umem->cq, &cfg); in xsk_configure_umem()
291 umem->buffer = buffer; in xsk_configure_umem()
315 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, in __xsk_configure_socket() argument
322 xsk->umem = umem; in __xsk_configure_socket()
[all …]
/linux-6.1.9/drivers/infiniband/sw/siw/
Dsiw_mem.c69 void siw_umem_release(struct siw_umem *umem, bool dirty) in siw_umem_release() argument
71 struct mm_struct *mm_s = umem->owning_mm; in siw_umem_release()
72 int i, num_pages = umem->num_pages; in siw_umem_release()
77 siw_free_plist(&umem->page_chunk[i], to_free, in siw_umem_release()
78 umem->writable && dirty); in siw_umem_release()
79 kfree(umem->page_chunk[i].plist); in siw_umem_release()
82 atomic64_sub(umem->num_pages, &mm_s->pinned_vm); in siw_umem_release()
85 kfree(umem->page_chunk); in siw_umem_release()
86 kfree(umem); in siw_umem_release()
148 siw_umem_release(mem->umem, true); in siw_free_mem()
[all …]
Dsiw_mem.h10 void siw_umem_release(struct siw_umem *umem, bool dirty);
58 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
60 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
64 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
65 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/linux-6.1.9/drivers/infiniband/hw/mlx4/
Dmr.c77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
203 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { in mlx4_ib_umem_write_mtt()
257 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, in mlx4_ib_umem_calc_optimal_mtt_size() argument
274 *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in mlx4_ib_umem_calc_optimal_mtt_size()
276 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { in mlx4_ib_umem_calc_optimal_mtt_size()
420 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); in mlx4_ib_reg_user_mr()
421 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
422 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
426 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); in mlx4_ib_reg_user_mr()
[all …]
Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
95 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
Dsrq.c117 srq->umem = in mlx4_ib_create_srq()
119 if (IS_ERR(srq->umem)) in mlx4_ib_create_srq()
120 return PTR_ERR(srq->umem); in mlx4_ib_create_srq()
123 dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE), in mlx4_ib_create_srq()
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq()
213 if (!srq->umem) in mlx4_ib_create_srq()
215 ib_umem_release(srq->umem); in mlx4_ib_create_srq()
289 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
/linux-6.1.9/drivers/infiniband/hw/mlx5/
Ddoorbell.c42 struct ib_umem *umem; member
69 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
71 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
72 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
100 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
Dmr.c55 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
849 mr->umem = NULL; in mlx5_ib_get_dma_mr()
904 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem, in mlx5_umem_dmabuf_default_pgsz() argument
911 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
916 struct ib_umem *umem, u64 iova, in alloc_cacheable_mr() argument
924 if (umem->is_dmabuf) in alloc_cacheable_mr()
925 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); in alloc_cacheable_mr()
927 page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, in alloc_cacheable_mr()
932 dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size))); in alloc_cacheable_mr()
939 mlx5_umem_needs_ats(dev, umem, access_flags)) { in alloc_cacheable_mr()
[all …]
Dmem.c41 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, in mlx5_ib_populate_pas() argument
46 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas()
59 struct ib_umem *umem, unsigned long pgsz_bitmap, in __mlx5_umem_find_best_quantized_pgoff() argument
67 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); in __mlx5_umem_find_best_quantized_pgoff()
78 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
81 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
/linux-6.1.9/lib/
Dtest_user_copy.c47 static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) in test_check_nonzero_user() argument
65 umem += start; in test_check_nonzero_user()
87 ret |= test(copy_to_user(umem, kmem, size), in test_check_nonzero_user()
93 int retval = check_zeroed_user(umem + start, len); in test_check_nonzero_user()
105 static int test_copy_struct_from_user(char *kmem, char __user *umem, in test_copy_struct_from_user() argument
124 ret |= test(copy_to_user(umem, umem_src, size), in test_copy_struct_from_user()
134 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
147 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
157 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, in test_copy_struct_from_user()
165 ret |= test(clear_user(umem + ksize, usize - ksize), in test_copy_struct_from_user()
[all …]
/linux-6.1.9/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_mr.c117 struct ib_umem *umem; in pvrdma_reg_user_mr() local
129 umem = ib_umem_get(pd->device, start, length, access_flags); in pvrdma_reg_user_mr()
130 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
133 return ERR_CAST(umem); in pvrdma_reg_user_mr()
136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
152 mr->umem = umem; in pvrdma_reg_user_mr()
161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
190 ib_umem_release(umem); in pvrdma_reg_user_mr()
257 mr->umem = NULL; in pvrdma_alloc_mr()
294 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
Dpvrdma_srq.c149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
209 ib_umem_release(srq->umem); in pvrdma_create_srq()
229 ib_umem_release(srq->umem); in pvrdma_free_srq()
/linux-6.1.9/drivers/vdpa/vdpa_user/
Dvduse_dev.c107 struct vduse_umem *umem; member
943 if (!dev->umem) in vduse_dev_dereg_umem()
947 if (dev->umem->iova != iova || size != dev->domain->bounce_size) in vduse_dev_dereg_umem()
951 unpin_user_pages_dirty_lock(dev->umem->pages, in vduse_dev_dereg_umem()
952 dev->umem->npages, true); in vduse_dev_dereg_umem()
953 atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm); in vduse_dev_dereg_umem()
954 mmdrop(dev->umem->mm); in vduse_dev_dereg_umem()
955 vfree(dev->umem->pages); in vduse_dev_dereg_umem()
956 kfree(dev->umem); in vduse_dev_dereg_umem()
957 dev->umem = NULL; in vduse_dev_dereg_umem()
[all …]
/linux-6.1.9/drivers/infiniband/hw/hns/
Dhns_roce_db.c32 page->umem = ib_umem_get(context->ibucontext.device, page_addr, in hns_roce_db_map_user()
34 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
35 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
44 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
45 db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
63 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()
/linux-6.1.9/drivers/infiniband/sw/rxe/
Drxe_mr.c121 struct ib_umem *umem; in rxe_mr_init_user() local
127 umem = ib_umem_get(&rxe->ib_dev, start, length, access); in rxe_mr_init_user()
128 if (IS_ERR(umem)) { in rxe_mr_init_user()
130 __func__, (int)PTR_ERR(umem)); in rxe_mr_init_user()
131 err = PTR_ERR(umem); in rxe_mr_init_user()
135 num_buf = ib_umem_num_pages(umem); in rxe_mr_init_user()
154 for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) { in rxe_mr_init_user()
176 mr->umem = umem; in rxe_mr_init_user()
178 mr->offset = ib_umem_offset(umem); in rxe_mr_init_user()
185 ib_umem_release(umem); in rxe_mr_init_user()
[all …]

1234