Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 25 of 290) sorted by relevance

12345678910>>...12

/linux-6.1.9/tools/testing/selftests/vm/
Dhmm-tests.c176 unsigned long npages) in hmm_dmirror_cmd() argument
184 cmd.npages = npages; in hmm_dmirror_cmd()
265 unsigned long npages) in hmm_migrate_sys_to_dev() argument
267 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages); in hmm_migrate_sys_to_dev()
272 unsigned long npages) in hmm_migrate_dev_to_sys() argument
274 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages); in hmm_migrate_dev_to_sys()
290 unsigned long npages; in TEST_F() local
297 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
298 ASSERT_NE(npages, 0); in TEST_F()
299 size = npages << self->page_shift; in TEST_F()
[all …]
/linux-6.1.9/drivers/gpu/drm/i915/selftests/
Dscatterlist.c53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() local
61 if (sg->length != npages * PAGE_SIZE) { in expect_pfn_sg()
63 __func__, who, npages * PAGE_SIZE, sg->length); in expect_pfn_sg()
70 pfn += npages; in expect_pfn_sg()
209 unsigned long npages) in page_contiguous() argument
211 return first + npages == last; in page_contiguous()
238 unsigned long npages = npages_fn(n, count, rnd); in alloc_table() local
242 pfn_to_page(pfn + npages), in alloc_table()
243 npages)) { in alloc_table()
250 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); in alloc_table()
[all …]
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c52 s32 npages; member
175 s32 *npages, int boot) in mlx5_cmd_query_pages() argument
191 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages()
328 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument
340 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages()
348 for (i = 0; i < npages; i++) { in give_pages()
355 dev->priv.fw_pages_alloc_failed += (npages - i); in give_pages()
367 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages()
382 func_id, npages, err); in give_pages()
386 dev->priv.fw_pages += npages; in give_pages()
[all …]
/linux-6.1.9/arch/sparc/kernel/
Diommu.c158 unsigned long npages) in alloc_npages() argument
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages()
204 int npages, nid; in dma_4u_alloc_coherent() local
233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent()
235 while (npages--) { in dma_4u_alloc_coherent()
251 unsigned long order, npages; in dma_4u_free_coherent() local
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent()
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent()
271 unsigned long flags, npages, oaddr; in dma_4u_map_page() local
283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page()
[all …]
Dpci_sun4v.c60 unsigned long npages; /* Number of pages in list. */ member
74 p->npages = 0; in iommu_batch_start()
91 unsigned long npages = p->npages; in iommu_batch_flush() local
100 while (npages != 0) { in iommu_batch_flush()
104 npages, in iommu_batch_flush()
112 npages, prot, __pa(pglist), in iommu_batch_flush()
117 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush()
135 npages -= num; in iommu_batch_flush()
140 p->npages = 0; in iommu_batch_flush()
149 if (p->entry + p->npages == entry) in iommu_batch_new_entry()
[all …]
/linux-6.1.9/drivers/infiniband/hw/hfi1/
Duser_pages.c30 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument
55 if (pinned + npages >= ulimit && !can_lock) in hfi1_can_pin_pages()
58 return ((nlocked + npages) <= size) || can_lock; in hfi1_can_pin_pages()
61 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument
67 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages()
77 size_t npages, bool dirty) in hfi1_release_user_pages() argument
79 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages()
82 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
Duser_exp_rcv.c16 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
20 u16 pageidx, unsigned int npages);
137 unsigned int npages, in unpin_rcv_pages() argument
146 node->npages * PAGE_SIZE, DMA_FROM_DEVICE); in unpin_rcv_pages()
153 hfi1_release_user_pages(mm, pages, npages, mapped); in unpin_rcv_pages()
154 fd->tid_n_pinned -= npages; in unpin_rcv_pages()
163 unsigned int npages; in pin_rcv_pages() local
169 npages = num_user_pages(vaddr, tidbuf->length); in pin_rcv_pages()
170 if (!npages) in pin_rcv_pages()
173 if (npages > fd->uctxt->expected_count) { in pin_rcv_pages()
[all …]
/linux-6.1.9/drivers/gpu/drm/i915/gem/selftests/
Dmock_dmabuf.c21 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf()
26 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf()
58 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release()
69 vaddr = vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap()
81 vm_unmap_ram(map->vaddr, mock->npages); in mock_dmabuf_vunmap()
98 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument
105 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf()
110 mock->npages = npages; in mock_dmabuf()
111 for (i = 0; i < npages; i++) { in mock_dmabuf()
118 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
/linux-6.1.9/drivers/gpu/drm/amd/amdkfd/
Dkfd_migrate.c50 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, in svm_migrate_gart_map() argument
66 num_bytes = npages * 8; in svm_migrate_gart_map()
91 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); in svm_migrate_gart_map()
131 uint64_t *vram, uint64_t npages, in svm_migrate_copy_memory_gart() argument
144 while (npages) { in svm_migrate_copy_memory_gart()
145 size = min(GTT_MAX_PAGES, npages); in svm_migrate_copy_memory_gart()
170 npages -= size; in svm_migrate_copy_memory_gart()
171 if (npages) { in svm_migrate_copy_memory_gart()
274 for (i = 0; i < migrate->npages; i++) { in svm_migrate_successful_pages()
287 for (i = 0; i < migrate->npages; i++) { in svm_migrate_unsuccessful_pages()
[all …]
/linux-6.1.9/arch/x86/mm/
Dmem_encrypt_amd.c61 unsigned long npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; in snp_memcpy() local
68 early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages); in snp_memcpy()
73 early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages); in snp_memcpy()
291 static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) in enc_dec_hypercall() argument
294 unsigned long sz = npages << PAGE_SHIFT; in enc_dec_hypercall()
322 static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) in amd_enc_status_change_prepare() argument
329 snp_set_memory_shared(vaddr, npages); in amd_enc_status_change_prepare()
333 static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc) in amd_enc_status_change_finish() argument
340 snp_set_memory_private(vaddr, npages); in amd_enc_status_change_finish()
343 enc_dec_hypercall(vaddr, npages, enc); in amd_enc_status_change_finish()
[all …]
Dcpu_entry_area.c63 unsigned int npages; in percpu_setup_debug_store() local
70 npages = sizeof(struct debug_store) / PAGE_SIZE; in percpu_setup_debug_store()
72 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store()
80 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; in percpu_setup_debug_store()
81 for (; npages; npages--, cea += PAGE_SIZE) in percpu_setup_debug_store()
89 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
91 estacks->name## _stack, npages, PAGE_KERNEL); \
98 unsigned int npages; in percpu_setup_exception_stacks() local
/linux-6.1.9/drivers/vfio/
Diova_bitmap.c44 unsigned long npages; member
163 unsigned long npages; in iova_bitmap_get() local
173 npages = DIV_ROUND_UP((bitmap->mapped_total_index - in iova_bitmap_get()
181 npages = min(npages, PAGE_SIZE / sizeof(struct page *)); in iova_bitmap_get()
189 ret = pin_user_pages_fast((unsigned long)addr, npages, in iova_bitmap_get()
194 mapped->npages = (unsigned long)ret; in iova_bitmap_get()
216 if (mapped->npages) { in iova_bitmap_put()
217 unpin_user_pages(mapped->pages, mapped->npages); in iova_bitmap_put()
218 mapped->npages = 0; in iova_bitmap_put()
300 bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; in iova_bitmap_mapped_remaining()
/linux-6.1.9/arch/powerpc/kernel/
Diommu.c205 unsigned long npages, in iommu_range_alloc() argument
212 int largealloc = npages > 15; in iommu_range_alloc()
224 if (unlikely(npages == 0)) { in iommu_range_alloc()
278 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
314 end = n + npages; in iommu_range_alloc()
336 void *page, unsigned int npages, in iommu_alloc() argument
345 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
354 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
364 __iommu_free(tbl, ret, npages); in iommu_alloc()
379 unsigned int npages) in iommu_free_check() argument
[all …]
/linux-6.1.9/mm/
Dmigrate_device.c28 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip()
29 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip()
48 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole()
49 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole()
50 migrate->npages++; in migrate_vma_collect_hole()
272 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_pmd()
273 migrate->src[migrate->npages++] = mpfn; in migrate_vma_collect_pmd()
317 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); in migrate_vma_collect()
364 unsigned long npages, in migrate_device_unmap() argument
373 for (i = 0; i < npages; i++) { in migrate_device_unmap()
[all …]
Dhmm.c122 const unsigned long hmm_pfns[], unsigned long npages, in hmm_range_need_fault() argument
138 for (i = 0; i < npages; ++i) { in hmm_range_need_fault()
153 unsigned long i, npages; in hmm_vma_walk_hole() local
157 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole()
160 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); in hmm_vma_walk_hole()
193 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local
197 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd()
200 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); in hmm_vma_handle_pmd()
329 unsigned long npages = (end - start) >> PAGE_SHIFT; in hmm_vma_walk_pmd() local
340 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { in hmm_vma_walk_pmd()
[all …]
/linux-6.1.9/drivers/fpga/
Ddfl-afu-dma-region.c37 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local
41 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages()
45 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages()
51 pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages()
56 } else if (pinned != npages) { in afu_dma_pin_pages()
70 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages()
85 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages() local
88 unpin_user_pages(region->pages, npages); in afu_dma_unpin_pages()
90 account_locked_vm(current->mm, npages, false); in afu_dma_unpin_pages()
92 dev_dbg(dev, "%ld pages unpinned\n", npages); in afu_dma_unpin_pages()
[all …]
/linux-6.1.9/drivers/infiniband/hw/mthca/
Dmthca_memfree.c69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument
157 while (npages > 0) { in mthca_alloc_icm()
165 chunk->npages = 0; in mthca_alloc_icm()
170 while (1 << cur_order > npages) in mthca_alloc_icm()
175 &chunk->mem[chunk->npages], in mthca_alloc_icm()
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm()
182 ++chunk->npages; in mthca_alloc_icm()
[all …]
Dmthca_allocator.c195 int npages, shift; in mthca_buf_alloc() local
202 npages = 1; in mthca_buf_alloc()
214 npages *= 2; in mthca_buf_alloc()
217 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc()
222 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
226 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc()
229 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc()
234 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc()
240 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
243 for (i = 0; i < npages; ++i) { in mthca_buf_alloc()
[all …]
/linux-6.1.9/drivers/infiniband/core/
Dib_core_uverbs.c141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff()
171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get()
191 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free()
196 entry->start_pgoff, entry->npages); in rdma_user_mmap_entry_free()
269 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local
290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range()
291 entry->npages = npages; in rdma_user_mmap_entry_insert_range()
301 if (check_add_overflow(xa_first, npages, &xa_last)) in rdma_user_mmap_entry_insert_range()
328 entry->start_pgoff, npages); in rdma_user_mmap_entry_insert_range()
Dumem.c157 unsigned long npages; in ib_umem_get() local
196 npages = ib_umem_num_pages(umem); in ib_umem_get()
197 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get()
204 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get()
206 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get()
216 while (npages) { in ib_umem_get()
219 min_t(unsigned long, npages, in ib_umem_get()
229 npages -= pinned; in ib_umem_get()
233 npages, GFP_KERNEL); in ib_umem_get()
/linux-6.1.9/arch/x86/include/asm/
Dsev.h190 unsigned int npages);
192 unsigned int npages);
194 void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
195 void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
210 early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned int npages) { } in early_snp_set_memory_private() argument
212 early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned int npages) { } in early_snp_set_memory_shared() argument
214 static inline void snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { } in snp_set_memory_shared() argument
215 static inline void snp_set_memory_private(unsigned long vaddr, unsigned int npages) { } in snp_set_memory_private() argument
/linux-6.1.9/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_misc.c53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument
57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init()
67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init()
81 pdir->npages = npages; in pvrdma_page_dir_init()
84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init()
89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init()
127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages()
173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma()
189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem()
212 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
/linux-6.1.9/tools/testing/selftests/kvm/
Dmemslot_perf_test.c94 uint64_t npages; member
197 TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096, in vm_gpa2hva()
210 slotpages = data->npages - slot * data->pages_per_slot; in vm_gpa2hva()
267 data->npages = mempages; in prepare_vm()
287 uint64_t npages; in prepare_vm() local
289 npages = data->pages_per_slot; in prepare_vm()
291 npages += rempages; in prepare_vm()
294 guest_addr, slot, npages, in prepare_vm()
296 guest_addr += npages * 4096; in prepare_vm()
301 uint64_t npages; in prepare_vm() local
[all …]
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx4/
Dicm.c60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages()
63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
132 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument
159 while (npages > 0) { in mlx4_alloc_icm()
179 while (1 << cur_order > npages) in mlx4_alloc_icm()
188 &chunk->buf[chunk->npages], in mlx4_alloc_icm()
191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], in mlx4_alloc_icm()
202 ++chunk->npages; in mlx4_alloc_icm()
206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { in mlx4_alloc_icm()
[all …]
/linux-6.1.9/arch/powerpc/sysdev/
Ddart_iommu.c173 long npages, unsigned long uaddr, in dart_build() argument
181 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build()
188 l = npages; in dart_build()
196 dart_cache_sync(orig_dp, npages); in dart_build()
200 while (npages--) in dart_build()
209 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument
212 long orig_npages = npages; in dart_free()
219 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free()
223 while (npages--) in dart_free()

12345678910>>...12