/linux-6.1.9/mm/ |
D | page_counter.c | 49 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument 53 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel() 56 new, nr_pages)) { in page_counter_cancel() 70 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument 77 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge() 98 unsigned long nr_pages, in page_counter_try_charge() argument 119 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge() 121 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge() 143 page_counter_cancel(c, nr_pages); in page_counter_try_charge() 153 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument [all …]
|
D | memory_hotplug.c | 232 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages) in check_pfn_span() argument 249 if (!IS_ALIGNED(pfn | nr_pages, min_align)) in check_pfn_span() 302 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument 305 const unsigned long end_pfn = pfn + nr_pages; in __add_pages() 313 VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); in __add_pages() 320 || vmem_altmap_offset(altmap) > nr_pages) { in __add_pages() 327 if (check_pfn_span(pfn, nr_pages)) { in __add_pages() 328 WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); in __add_pages() 462 unsigned long nr_pages) in remove_pfn_range_from_zone() argument 464 const unsigned long end_pfn = start_pfn + nr_pages; in remove_pfn_range_from_zone() [all …]
|
D | percpu-km.c | 55 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local 65 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk() 71 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk() 78 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk() 89 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local 98 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk() 109 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local 117 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info() 118 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info() 120 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info() [all …]
|
D | sparse.c | 186 unsigned long nr_pages) in subsection_mask_set() argument 189 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set() 194 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument 196 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init() 199 if (!nr_pages) in subsection_map_init() 206 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init() 216 nr_pages -= pfns; in subsection_map_init() 220 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument 430 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, in __populate_section_memmap() argument 633 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, in populate_section_memmap() argument [all …]
|
D | gup_test.c | 10 unsigned long nr_pages, unsigned int gup_test_flags) in put_back_pages() argument 17 for (i = 0; i < nr_pages; i++) in put_back_pages() 24 unpin_user_pages(pages, nr_pages); in put_back_pages() 28 unpin_user_pages(pages, nr_pages); in put_back_pages() 30 for (i = 0; i < nr_pages; i++) in put_back_pages() 39 unsigned long nr_pages) in verify_dma_pinned() argument 48 for (i = 0; i < nr_pages; i++) { in verify_dma_pinned() 68 unsigned long nr_pages) in dump_pages_test() argument 78 if (gup->which_pages[i] > nr_pages) { in dump_pages_test() 102 unsigned long i, nr_pages, addr, next; in __gup_test_ioctl() local [all …]
|
D | hugetlb_cgroup.c | 190 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local 204 nr_pages = compound_nr(page); in hugetlb_cgroup_move_parent() 208 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent() 212 page_counter_cancel(counter, nr_pages); in hugetlb_cgroup_move_parent() 254 static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in __hugetlb_cgroup_charge_cgroup() argument 281 nr_pages, &counter)) { in __hugetlb_cgroup_charge_cgroup() 297 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument 300 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false); in hugetlb_cgroup_charge_cgroup() 303 int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup_rsvd() argument 306 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true); in hugetlb_cgroup_charge_cgroup_rsvd() [all …]
|
D | process_vm_access.c | 80 unsigned long nr_pages; in process_vm_rw_single_vec() local 89 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec() 94 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec() 95 int pinned_pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() 122 nr_pages -= pinned_pages; in process_vm_rw_single_vec() 161 unsigned long nr_pages = 0; in process_vm_rw_core() local 177 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core() 181 if (nr_pages == 0) in process_vm_rw_core() 184 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core() 188 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
|
D | page_ext.c | 217 unsigned long nr_pages; in alloc_node_page_ext() local 219 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_ext() 220 if (!nr_pages) in alloc_node_page_ext() 230 nr_pages += MAX_ORDER_NR_PAGES; in alloc_node_page_ext() 232 table_size = page_ext_size * nr_pages; in alloc_node_page_ext() 392 unsigned long nr_pages, in online_page_ext() argument 399 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext() 424 unsigned long nr_pages) in offline_page_ext() argument 429 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext() 460 mn->nr_pages, mn->status_change_nid); in page_ext_callback() [all …]
|
D | memcontrol.c | 257 unsigned int nr_pages); 263 unsigned int nr_pages; in obj_cgroup_release() local 288 nr_pages = nr_bytes >> PAGE_SHIFT; in obj_cgroup_release() 290 if (nr_pages) in obj_cgroup_release() 291 obj_cgroup_uncharge_pages(objcg, nr_pages); in obj_cgroup_release() 464 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() local 468 if (nr_pages > soft_limit) in soft_limit_excess() 469 excess = nr_pages - soft_limit; in soft_limit_excess() 915 int nr_pages) in mem_cgroup_charge_statistics() argument 918 if (nr_pages > 0) in mem_cgroup_charge_statistics() [all …]
|
D | gup.c | 1163 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument 1171 if (!nr_pages) in __get_user_pages() 1206 &start, &nr_pages, i, in __get_user_pages() 1277 if (page_increm > nr_pages) in __get_user_pages() 1278 page_increm = nr_pages; in __get_user_pages() 1281 nr_pages -= page_increm; in __get_user_pages() 1282 } while (nr_pages); in __get_user_pages() 1403 unsigned long nr_pages, in __get_user_pages_locked() argument 1437 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked() 1446 BUG_ON(ret >= nr_pages); in __get_user_pages_locked() [all …]
|
/linux-6.1.9/include/linux/ |
D | hugetlb_cgroup.h | 144 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, 146 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, 148 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 151 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, 154 extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, 156 extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages, 159 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, 161 extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages, 169 unsigned long nr_pages, 179 unsigned long nr_pages, in hugetlb_cgroup_uncharge_file_region() argument [all …]
|
D | page_counter.h | 60 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); 61 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); 63 unsigned long nr_pages, 65 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); 66 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); 67 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); 70 unsigned long nr_pages) in page_counter_set_high() argument 72 WRITE_ONCE(counter->high, nr_pages); in page_counter_set_high() 75 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); 77 unsigned long *nr_pages);
|
D | memory_hotplug.h | 152 long nr_pages); 154 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, 156 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); 157 extern int online_pages(unsigned long pfn, unsigned long nr_pages, 186 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 190 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 195 unsigned long nr_pages, struct mhp_params *params) in add_pages() argument 197 return __add_pages(nid, start_pfn, nr_pages, params); in add_pages() 200 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 306 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, [all …]
|
/linux-6.1.9/fs/iomap/ |
D | swapfile.c | 19 unsigned long nr_pages; /* number of pages collected */ member 33 unsigned long nr_pages; in iomap_swapfile_add_extent() local 40 if (unlikely(isi->nr_pages >= isi->sis->max)) in iomap_swapfile_add_extent() 42 max_pages = isi->sis->max - isi->nr_pages; in iomap_swapfile_add_extent() 55 nr_pages = next_ppage - first_ppage; in iomap_swapfile_add_extent() 56 nr_pages = min(nr_pages, max_pages); in iomap_swapfile_add_extent() 72 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); in iomap_swapfile_add_extent() 76 isi->nr_pages += nr_pages; in iomap_swapfile_add_extent() 184 if (isi.nr_pages == 0) { in iomap_swapfile_activate() 190 sis->max = isi.nr_pages; in iomap_swapfile_activate() [all …]
|
/linux-6.1.9/drivers/media/v4l2-core/ |
D | videobuf-dma-sg.c | 63 int nr_pages) in videobuf_vmalloc_to_sg() argument 69 sglist = vzalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_vmalloc_to_sg() 72 sg_init_table(sglist, nr_pages); in videobuf_vmalloc_to_sg() 73 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { in videobuf_vmalloc_to_sg() 93 int nr_pages, int offset, size_t size) in videobuf_pages_to_sg() argument 100 sglist = vmalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_pages_to_sg() 103 sg_init_table(sglist, nr_pages); in videobuf_pages_to_sg() 111 for (i = 1; i < nr_pages; i++) { in videobuf_pages_to_sg() 174 dma->nr_pages = last-first+1; in videobuf_dma_init_user_locked() 175 dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *), in videobuf_dma_init_user_locked() [all …]
|
/linux-6.1.9/arch/arm64/kvm/ |
D | pkvm.c | 56 u64 nr_pages, prev, hyp_mem_pages = 0; in kvm_hyp_reserve() local 80 nr_pages = 0; in kvm_hyp_reserve() 82 prev = nr_pages; in kvm_hyp_reserve() 83 nr_pages = hyp_mem_pages + prev; in kvm_hyp_reserve() 84 nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE, in kvm_hyp_reserve() 86 nr_pages += __hyp_pgtable_max_pages(nr_pages); in kvm_hyp_reserve() 87 } while (nr_pages != prev); in kvm_hyp_reserve() 88 hyp_mem_pages += nr_pages; in kvm_hyp_reserve()
|
/linux-6.1.9/drivers/xen/ |
D | balloon.c | 404 static enum bp_state increase_reservation(unsigned long nr_pages) in increase_reservation() argument 410 if (nr_pages > ARRAY_SIZE(frame_list)) in increase_reservation() 411 nr_pages = ARRAY_SIZE(frame_list); in increase_reservation() 414 for (i = 0; i < nr_pages; i++) { in increase_reservation() 416 nr_pages = i; in increase_reservation() 424 rc = xenmem_reservation_increase(nr_pages, frame_list); in increase_reservation() 443 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) in decrease_reservation() argument 451 if (nr_pages > ARRAY_SIZE(frame_list)) in decrease_reservation() 452 nr_pages = ARRAY_SIZE(frame_list); in decrease_reservation() 454 for (i = 0; i < nr_pages; i++) { in decrease_reservation() [all …]
|
D | unpopulated-alloc.c | 34 static int fill_list(unsigned int nr_pages) in fill_list() argument 39 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); in fill_list() 158 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) in xen_alloc_unpopulated_pages() argument 169 return xen_alloc_ballooned_pages(nr_pages, pages); in xen_alloc_unpopulated_pages() 172 if (list_count < nr_pages) { in xen_alloc_unpopulated_pages() 173 ret = fill_list(nr_pages - list_count); in xen_alloc_unpopulated_pages() 178 for (i = 0; i < nr_pages; i++) { in xen_alloc_unpopulated_pages() 214 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) in xen_free_unpopulated_pages() argument 219 xen_free_ballooned_pages(nr_pages, pages); in xen_free_unpopulated_pages() 224 for (i = 0; i < nr_pages; i++) { in xen_free_unpopulated_pages()
|
/linux-6.1.9/include/xen/ |
D | xen.h | 56 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); 57 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); 62 static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages, in xen_alloc_unpopulated_pages() argument 65 return xen_alloc_ballooned_pages(nr_pages, pages); in xen_alloc_unpopulated_pages() 67 static inline void xen_free_unpopulated_pages(unsigned int nr_pages, in xen_free_unpopulated_pages() argument 70 xen_free_ballooned_pages(nr_pages, pages); in xen_free_unpopulated_pages()
|
/linux-6.1.9/drivers/firmware/efi/libstub/ |
D | relocate.c | 28 unsigned long nr_pages; in efi_low_alloc_above() local 45 nr_pages = size / EFI_PAGE_SIZE; in efi_low_alloc_above() 60 if (desc->num_pages < nr_pages) in efi_low_alloc_above() 74 EFI_LOADER_DATA, nr_pages, &start); in efi_low_alloc_above() 120 unsigned long nr_pages; in efi_relocate_kernel() local 138 nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; in efi_relocate_kernel() 140 EFI_LOADER_DATA, nr_pages, &efi_addr); in efi_relocate_kernel()
|
/linux-6.1.9/net/rds/ |
D | info.c | 163 unsigned long nr_pages = 0; in rds_info_getsockopt() local 187 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) in rds_info_getsockopt() 190 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt() 195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt() 196 if (ret != nr_pages) { in rds_info_getsockopt() 198 nr_pages = ret; in rds_info_getsockopt() 200 nr_pages = 0; in rds_info_getsockopt() 205 rdsdebug("len %d nr_pages %lu\n", len, nr_pages); in rds_info_getsockopt() 238 unpin_user_pages(pages, nr_pages); in rds_info_getsockopt()
|
/linux-6.1.9/tools/testing/selftests/vm/ |
D | userfaultfd.c | 68 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size, hpage_size; variable 236 if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) in anon_release_pages() 242 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in anon_allocate_area() 253 if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) in hugetlb_release_pages() 256 if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE)) in hugetlb_release_pages() 268 nr_pages * page_size, in hugetlb_allocate_area() 276 nr_pages * page_size, in hugetlb_allocate_area() 281 is_src ? 0 : nr_pages * page_size); in hugetlb_allocate_area() 287 nr_pages * page_size, in hugetlb_allocate_area() 291 is_src ? 0 : nr_pages * page_size); in hugetlb_allocate_area() [all …]
|
/linux-6.1.9/fs/crypto/ |
D | bio.c | 121 unsigned int nr_pages; in fscrypt_zeroout_range() local 135 nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), in fscrypt_zeroout_range() 145 for (i = 0; i < nr_pages; i++) { in fscrypt_zeroout_range() 151 nr_pages = i; in fscrypt_zeroout_range() 152 if (WARN_ON(nr_pages <= 0)) in fscrypt_zeroout_range() 156 bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS); in fscrypt_zeroout_range() 181 } while (i != nr_pages && len != 0); in fscrypt_zeroout_range() 191 for (i = 0; i < nr_pages; i++) in fscrypt_zeroout_range()
|
/linux-6.1.9/drivers/misc/bcm-vk/ |
D | bcm_vk_sg.c | 60 dma->nr_pages = last - first + 1; in bcm_vk_dma_alloc() 63 dma->pages = kmalloc_array(dma->nr_pages, in bcm_vk_dma_alloc() 70 data, vkdata->size, dma->nr_pages); in bcm_vk_dma_alloc() 76 dma->nr_pages, in bcm_vk_dma_alloc() 79 if (err != dma->nr_pages) { in bcm_vk_dma_alloc() 80 dma->nr_pages = (err >= 0) ? err : 0; in bcm_vk_dma_alloc() 82 err, dma->nr_pages); in bcm_vk_dma_alloc() 87 dma->sglen = (dma->nr_pages * sizeof(*sgdata)) + in bcm_vk_dma_alloc() 117 for (i = 1; i < dma->nr_pages; i++) { in bcm_vk_dma_alloc() 250 for (i = 0; i < dma->nr_pages; i++) in bcm_vk_dma_free()
|
/linux-6.1.9/arch/powerpc/platforms/powernv/ |
D | memtrace.c | 92 unsigned long nr_pages) in memtrace_clear_range() argument 97 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { in memtrace_clear_range() 107 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), in memtrace_clear_range() 113 const unsigned long nr_pages = PHYS_PFN(size); in memtrace_alloc_node() local 121 page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | in memtrace_alloc_node() 132 memtrace_clear_range(start_pfn, nr_pages); in memtrace_alloc_node() 138 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_alloc_node() 216 const unsigned long nr_pages = PHYS_PFN(size); in memtrace_free() local 225 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_free() 228 free_contig_range(start_pfn, nr_pages); in memtrace_free()
|