Home
last modified time | relevance | path

Searched refs:num_pages (Results 1 – 25 of 268) sorted by relevance

1234567891011

/linux-6.1.9/tools/testing/selftests/vm/
Dmremap_dontunmap.c48 unsigned long num_pages = 1; in kernel_support_for_mremap_dontunmap() local
49 void *source_mapping = mmap(NULL, num_pages * page_size, PROT_NONE, in kernel_support_for_mremap_dontunmap()
56 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in kernel_support_for_mremap_dontunmap()
61 BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap()
65 BUG_ON(munmap(source_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap()
81 unsigned long num_pages = size / page_size; in check_region_contains_byte() local
85 for (i = 0; i < num_pages; ++i) { in check_region_contains_byte()
100 unsigned long num_pages = 5; in mremap_dontunmap_simple() local
103 mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE, in mremap_dontunmap_simple()
107 memset(source_mapping, 'a', num_pages * page_size); in mremap_dontunmap_simple()
[all …]
/linux-6.1.9/drivers/infiniband/hw/qib/
Dqib_user_pages.c40 static void __qib_release_user_pages(struct page **p, size_t num_pages, in __qib_release_user_pages() argument
43 unpin_user_pages_dirty_lock(p, num_pages, dirty); in __qib_release_user_pages()
94 int qib_get_user_pages(unsigned long start_page, size_t num_pages, in qib_get_user_pages() argument
102 locked = atomic64_add_return(num_pages, &current->mm->pinned_vm); in qib_get_user_pages()
110 for (got = 0; got < num_pages; got += ret) { in qib_get_user_pages()
112 num_pages - got, in qib_get_user_pages()
126 atomic64_sub(num_pages, &current->mm->pinned_vm); in qib_get_user_pages()
130 void qib_release_user_pages(struct page **p, size_t num_pages) in qib_release_user_pages() argument
132 __qib_release_user_pages(p, num_pages, 1); in qib_release_user_pages()
136 atomic64_sub(num_pages, &current->mm->pinned_vm); in qib_release_user_pages()
/linux-6.1.9/drivers/gpu/drm/ttm/
Dttm_pool.c166 unsigned int num_pages = last - first; in ttm_pool_apply_caching() local
168 if (!num_pages) in ttm_pool_apply_caching()
175 return set_pages_array_wc(first, num_pages); in ttm_pool_apply_caching()
177 return set_pages_array_uc(first, num_pages); in ttm_pool_apply_caching()
212 unsigned int num_pages) in ttm_pool_unmap() argument
218 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, in ttm_pool_unmap()
225 unsigned int i, num_pages = 1 << pt->order; in ttm_pool_type_give() local
227 for (i = 0; i < num_pages; ++i) { in ttm_pool_type_give()
316 unsigned int num_pages; in ttm_pool_shrink() local
327 num_pages = 1 << pt->order; in ttm_pool_shrink()
[all …]
Dttm_tt.c99 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); in ttm_tt_alloc_page_directory()
108 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) + in ttm_dma_tt_alloc_page_directory()
113 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); in ttm_dma_tt_alloc_page_directory()
119 ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address), in ttm_sg_tt_alloc_page_directory()
138 ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages; in ttm_tt_init_fields()
212 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapin()
252 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT; in ttm_tt_swapout()
268 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapout()
288 return ttm->num_pages; in ttm_tt_swapout()
308 atomic_long_add(ttm->num_pages, &ttm_pages_allocated); in ttm_tt_populate()
[all …]
Dttm_device.c69 unsigned long num_pages, num_dma32; in ttm_global_init() local
87 num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT; in ttm_global_init()
88 num_pages /= 2; in ttm_global_init()
95 ttm_pool_mgr_init(num_pages); in ttm_global_init()
96 ttm_tt_mgr_init(num_pages, num_dma32); in ttm_global_init()
159 uint32_t num_pages; in ttm_device_swapout() local
164 num_pages = PFN_UP(bo->base.size); in ttm_device_swapout()
168 return num_pages; in ttm_device_swapout()
Dttm_bo_util.c90 u32 num_pages, in ttm_move_memcpy() argument
105 for (i = 0; i < num_pages; ++i) { in ttm_move_memcpy()
117 for (i = 0; i < num_pages; ++i) { in ttm_move_memcpy()
176 ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter); in ttm_bo_move_memcpy()
311 unsigned long num_pages, in ttm_bo_kmap_ttm() argument
329 if (num_pages == 1 && ttm->caching == ttm_cached) { in ttm_bo_kmap_ttm()
345 map->virtual = vmap(ttm->pages + start_page, num_pages, in ttm_bo_kmap_ttm()
352 unsigned long start_page, unsigned long num_pages, in ttm_bo_kmap() argument
360 if (num_pages > bo->resource->num_pages) in ttm_bo_kmap()
362 if ((start_page + num_pages) > bo->resource->num_pages) in ttm_bo_kmap()
[all …]
/linux-6.1.9/drivers/gpu/drm/vmwgfx/
Dvmwgfx_gmr.c40 unsigned long num_pages, in vmw_gmr2_bind() argument
48 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); in vmw_gmr2_bind()
49 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; in vmw_gmr2_bind()
59 define_cmd.numPages = num_pages; in vmw_gmr2_bind()
74 while (num_pages > 0) { in vmw_gmr2_bind()
75 unsigned long nr = min_t(unsigned long, num_pages, VMW_PPN_PER_REMAP); in vmw_gmr2_bind()
95 num_pages -= nr; in vmw_gmr2_bind()
129 unsigned long num_pages, in vmw_gmr_bind() argument
142 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); in vmw_gmr_bind()
Dvmwgfx_page_dirty.c233 pgoff_t num_pages = vbo->base.resource->num_pages; in vmw_bo_dirty_add() local
242 size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); in vmw_bo_dirty_add()
249 dirty->bitmap_size = num_pages; in vmw_bo_dirty_add()
253 if (num_pages < PAGE_SIZE / sizeof(pte_t)) { in vmw_bo_dirty_add()
262 wp_shared_mapping_range(mapping, offset, num_pages); in vmw_bo_dirty_add()
263 clean_record_shared_mapping_range(mapping, offset, num_pages, in vmw_bo_dirty_add()
398 if (unlikely(page_offset >= bo->resource->num_pages)) { in vmw_bo_vm_mkwrite()
441 if (page_offset >= bo->resource->num_pages || in vmw_bo_vm_fault()
/linux-6.1.9/drivers/gpu/drm/xen/
Dxen_drm_front_gem.c29 size_t num_pages; member
48 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); in gem_alloc_pages_array()
49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array()
94 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_object_mmap()
160 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, in gem_create()
164 xen_obj->num_pages, ret); in gem_create()
176 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); in gem_create()
213 xen_free_unpopulated_pages(xen_obj->num_pages, in xen_drm_front_gem_free_object_unlocked()
241 xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_get_sg_table()
266 xen_obj->num_pages); in xen_drm_front_gem_import_sg_table()
[all …]
/linux-6.1.9/drivers/xen/
Dxen-front-pgdir-shbuf.c160 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE); in get_num_pages_dir()
187 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages; in guest_calc_num_grefs()
208 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops), in backend_unmap()
213 for (i = 0; i < buf->num_pages; i++) { in backend_unmap()
222 buf->num_pages); in backend_unmap()
224 for (i = 0; i < buf->num_pages; i++) { in backend_unmap()
253 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL); in backend_map()
257 buf->backend_map_handles = kcalloc(buf->num_pages, in backend_map()
271 grefs_left = buf->num_pages; in backend_map()
295 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages); in backend_map()
[all …]
/linux-6.1.9/drivers/infiniband/sw/siw/
Dsiw_mem.c63 static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages, in siw_free_plist() argument
66 unpin_user_pages_dirty_lock(chunk->plist, num_pages, dirty); in siw_free_plist()
72 int i, num_pages = umem->num_pages; in siw_umem_release() local
74 for (i = 0; num_pages; i++) { in siw_umem_release()
75 int to_free = min_t(int, PAGES_PER_CHUNK, num_pages); in siw_umem_release()
80 num_pages -= to_free; in siw_umem_release()
82 atomic64_sub(umem->num_pages, &mm_s->pinned_vm); in siw_umem_release()
372 int num_pages, num_chunks, i, rv = 0; in siw_umem_get() local
381 num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT; in siw_umem_get()
382 num_chunks = (num_pages >> CHUNK_SHIFT) + 1; in siw_umem_get()
[all …]
/linux-6.1.9/tools/testing/scatterlist/
Dmain.c10 unsigned num_pages; member
40 printf("%u input PFNs:", test->num_pages); in fail()
41 for (i = 0; i < test->num_pages; i++) in fail()
87 int left_pages = test->pfn_app ? test->num_pages : 0; in main()
92 set_pages(pages, test->pfn, test->num_pages); in main()
96 &append, pages, test->num_pages, 0, test->size, in main()
100 &append.sgt, pages, test->num_pages, 0, in main()
109 set_pages(pages, test->pfn_app, test->num_pages); in main()
111 &append, pages, test->num_pages, 0, test->size, in main()
/linux-6.1.9/drivers/media/common/videobuf2/
Dvideobuf2-dma-sg.c49 unsigned int num_pages; member
107 int num_pages; in vb2_dma_sg_alloc() local
121 buf->num_pages = size >> PAGE_SHIFT; in vb2_dma_sg_alloc()
129 buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL); in vb2_dma_sg_alloc()
138 buf->num_pages, 0, size, GFP_KERNEL); in vb2_dma_sg_alloc()
162 __func__, buf->num_pages); in vb2_dma_sg_alloc()
169 num_pages = buf->num_pages; in vb2_dma_sg_alloc()
170 while (num_pages--) in vb2_dma_sg_alloc()
171 __free_page(buf->pages[num_pages]); in vb2_dma_sg_alloc()
183 int i = buf->num_pages; in vb2_dma_sg_put()
[all …]
/linux-6.1.9/net/ceph/
Dpagevec.c13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument
17 for (i = 0; i < num_pages; i++) { in ceph_put_page_vector()
26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument
30 for (i = 0; i < num_pages; i++) in ceph_release_page_vector()
39 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) in ceph_alloc_page_vector() argument
44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector()
47 for (i = 0; i < num_pages; i++) { in ceph_alloc_page_vector()
/linux-6.1.9/arch/riscv/include/asm/
Dset_memory.h21 int num_pages)) in set_kernel_memory() argument
25 int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT; in set_kernel_memory() local
27 return set_memory(start, num_pages); in set_kernel_memory()
37 int num_pages)) in set_kernel_memory() argument
/linux-6.1.9/drivers/tee/
Dtee_shm.c65 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages()
67 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages()
240 size_t num_pages; in register_shm_helper() local
269 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; in register_shm_helper()
270 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in register_shm_helper()
277 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE, in register_shm_helper()
280 rc = shm_get_kernel_pages(start, num_pages, shm->pages); in register_shm_helper()
282 shm->num_pages = rc; in register_shm_helper()
283 if (rc != num_pages) { in register_shm_helper()
291 shm->num_pages, start); in register_shm_helper()
[all …]
/linux-6.1.9/arch/x86/platform/efi/
Defi.c127 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; in do_add_efi_memmap()
242 u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1; in efi_memmap_entry_valid()
246 if (md->num_pages == 0) { in efi_memmap_entry_valid()
248 } else if (md->num_pages > EFI_PAGES_MAX || in efi_memmap_entry_valid()
249 EFI_PAGES_MAX - md->num_pages < in efi_memmap_entry_valid()
251 end_hi = (md->num_pages & OVERFLOW_ADDR_MASK) in efi_memmap_entry_valid()
317 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1, in efi_print_memmap()
318 (md->num_pages >> (20 - EFI_PAGE_SHIFT))); in efi_print_memmap()
500 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT; in efi_merge_regions()
503 prev_md->num_pages += md->num_pages; in efi_merge_regions()
[all …]
Defi_32.c43 size = md->num_pages << PAGE_SHIFT; in efi_map_region()
51 set_memory_uc((unsigned long)va, md->num_pages); in efi_map_region()
82 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) in efi_setup_page_tables() argument
139 set_memory_x(md->virt_addr, md->num_pages); in efi_runtime_update_mappings()
/linux-6.1.9/drivers/firmware/efi/
Dmemmap.c256 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_split_count()
321 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_insert()
330 md->num_pages = (m_end - md->phys_addr + 1) >> in efi_memmap_insert()
337 md->num_pages = (end - md->phys_addr + 1) >> in efi_memmap_insert()
343 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert()
351 md->num_pages = (m_end - m_start + 1) >> in efi_memmap_insert()
358 md->num_pages = (end - m_end) >> in efi_memmap_insert()
365 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert()
372 md->num_pages = (end - md->phys_addr + 1) >> in efi_memmap_insert()
/linux-6.1.9/drivers/gpu/drm/gma500/
Dmmu.c478 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_flush_ptes() argument
496 rows = num_pages / desired_tile_stride; in psb_mmu_flush_ptes()
498 desired_tile_stride = num_pages; in psb_mmu_flush_ptes()
526 unsigned long address, uint32_t num_pages) in psb_mmu_remove_pfn_sequence() argument
537 end = addr + (num_pages << PAGE_SHIFT); in psb_mmu_remove_pfn_sequence()
554 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_remove_pfn_sequence()
565 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_remove_pages() argument
579 rows = num_pages / desired_tile_stride; in psb_mmu_remove_pages()
581 desired_tile_stride = num_pages; in psb_mmu_remove_pages()
611 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_remove_pages()
[all …]
Dmmu.h69 uint32_t num_pages);
73 uint32_t num_pages, int type);
78 unsigned long address, uint32_t num_pages,
82 unsigned long address, uint32_t num_pages,
/linux-6.1.9/drivers/gpu/drm/radeon/
Dradeon_ttm.c147 unsigned num_pages; in radeon_move_blit() local
184 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); in radeon_move_blit()
185 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv); in radeon_move_blit()
271 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; in radeon_ttm_io_mem_reserve()
352 unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr()
360 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr() local
364 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, in radeon_ttm_tt_pin_userptr()
371 } while (pinned < ttm->num_pages); in radeon_ttm_tt_pin_userptr()
373 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in radeon_ttm_tt_pin_userptr()
374 (u64)ttm->num_pages << PAGE_SHIFT, in radeon_ttm_tt_pin_userptr()
[all …]
/linux-6.1.9/arch/x86/hyperv/
Dhv_proc.c24 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) in hv_call_deposit_pages() argument
37 if (num_pages > HV_DEPOSIT_MAX) in hv_call_deposit_pages()
39 if (!num_pages) in hv_call_deposit_pages()
57 while (num_pages) { in hv_call_deposit_pages()
59 order = 31 - __builtin_clz(num_pages); in hv_call_deposit_pages()
75 num_pages -= counts[i]; in hv_call_deposit_pages()
/linux-6.1.9/fs/crypto/
Dbio.c58 int num_pages = 0; in fscrypt_zeroout_range_inline_crypt() local
68 if (num_pages == 0) { in fscrypt_zeroout_range_inline_crypt()
78 num_pages++; in fscrypt_zeroout_range_inline_crypt()
82 if (num_pages == BIO_MAX_VECS || !len || in fscrypt_zeroout_range_inline_crypt()
88 num_pages = 0; in fscrypt_zeroout_range_inline_crypt()
/linux-6.1.9/drivers/gpu/drm/
Ddrm_memory.c61 unsigned long i, num_pages = in agp_remap() local
88 page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); in agp_remap()
93 for (i = 0; i < num_pages; ++i) in agp_remap()
95 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); in agp_remap()

1234567891011