Home
last modified time | relevance | path

Searched refs:nr_pages (Results 1 – 25 of 394) sorted by relevance

12345678910>>...16

/linux-6.6.21/mm/
Dpage_counter.c49 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument
53 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel()
56 new, nr_pages)) { in page_counter_cancel()
70 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument
77 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge()
98 unsigned long nr_pages, in page_counter_try_charge() argument
119 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge()
121 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge()
143 page_counter_cancel(c, nr_pages); in page_counter_try_charge()
153 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument
[all …]
Dmemory_hotplug.c59 unsigned long nr_pages = PFN_UP(memory_block_memmap_size()); in memory_block_memmap_on_memory_pages() local
68 return pageblock_align(nr_pages); in memory_block_memmap_on_memory_pages()
69 return nr_pages; in memory_block_memmap_on_memory_pages()
299 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages) in check_pfn_span() argument
316 if (!IS_ALIGNED(pfn | nr_pages, min_align)) in check_pfn_span()
369 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
372 const unsigned long end_pfn = pfn + nr_pages; in __add_pages()
380 VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); in __add_pages()
387 || vmem_altmap_offset(altmap) > nr_pages) { in __add_pages()
394 if (check_pfn_span(pfn, nr_pages)) { in __add_pages()
[all …]
Dpercpu-km.c55 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local
65 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk()
71 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk()
78 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk()
89 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local
98 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk()
109 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local
117 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info()
118 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info()
120 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info()
[all …]
Dsparse.c185 unsigned long nr_pages) in subsection_mask_set() argument
188 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set()
193 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument
195 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init()
198 if (!nr_pages) in subsection_map_init()
205 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init()
215 nr_pages -= pfns; in subsection_map_init()
219 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument
429 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, in __populate_section_memmap() argument
632 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, in populate_section_memmap() argument
[all …]
Dgup_test.c11 unsigned long nr_pages, unsigned int gup_test_flags) in put_back_pages() argument
18 for (i = 0; i < nr_pages; i++) in put_back_pages()
25 unpin_user_pages(pages, nr_pages); in put_back_pages()
29 unpin_user_pages(pages, nr_pages); in put_back_pages()
31 for (i = 0; i < nr_pages; i++) in put_back_pages()
40 unsigned long nr_pages) in verify_dma_pinned() argument
49 for (i = 0; i < nr_pages; i++) { in verify_dma_pinned()
70 unsigned long nr_pages) in dump_pages_test() argument
80 if (gup->which_pages[i] > nr_pages) { in dump_pages_test()
104 unsigned long i, nr_pages, addr, next; in __gup_test_ioctl() local
[all …]
Dhugetlb_cgroup.c190 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local
205 nr_pages = compound_nr(page); in hugetlb_cgroup_move_parent()
209 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent()
213 page_counter_cancel(counter, nr_pages); in hugetlb_cgroup_move_parent()
255 static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in __hugetlb_cgroup_charge_cgroup() argument
282 nr_pages, &counter)) { in __hugetlb_cgroup_charge_cgroup()
298 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument
301 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false); in hugetlb_cgroup_charge_cgroup()
304 int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup_rsvd() argument
307 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true); in hugetlb_cgroup_charge_cgroup_rsvd()
[all …]
Dprocess_vm_access.c80 unsigned long nr_pages; in process_vm_rw_single_vec() local
89 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec()
94 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec()
95 int pinned_pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec()
122 nr_pages -= pinned_pages; in process_vm_rw_single_vec()
161 unsigned long nr_pages = 0; in process_vm_rw_core() local
177 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core()
181 if (nr_pages == 0) in process_vm_rw_core()
184 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core()
188 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
Dmemcontrol.c261 unsigned int nr_pages);
267 unsigned int nr_pages; in obj_cgroup_release() local
292 nr_pages = nr_bytes >> PAGE_SHIFT; in obj_cgroup_release()
294 if (nr_pages) in obj_cgroup_release()
295 obj_cgroup_uncharge_pages(objcg, nr_pages); in obj_cgroup_release()
470 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() local
474 if (nr_pages > soft_limit) in soft_limit_excess()
475 excess = nr_pages - soft_limit; in soft_limit_excess()
939 int nr_pages) in mem_cgroup_charge_statistics() argument
942 if (nr_pages > 0) in mem_cgroup_charge_statistics()
[all …]
/linux-6.6.21/include/linux/
Dhugetlb_cgroup.h138 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
140 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
142 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
145 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
148 extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
150 extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
153 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
155 extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
163 unsigned long nr_pages,
173 unsigned long nr_pages, in hugetlb_cgroup_uncharge_file_region() argument
[all …]
Dpage_counter.h60 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
61 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
63 unsigned long nr_pages,
65 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
66 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
67 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
70 unsigned long nr_pages) in page_counter_set_high() argument
72 WRITE_ONCE(counter->high, nr_pages); in page_counter_set_high()
75 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
77 unsigned long *nr_pages);
Dmemory_hotplug.h154 long nr_pages);
156 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
158 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
159 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
188 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
192 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
197 unsigned long nr_pages, struct mhp_params *params) in add_pages() argument
199 return __add_pages(nid, start_pfn, nr_pages, params); in add_pages()
202 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
308 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
[all …]
/linux-6.6.21/fs/iomap/
Dswapfile.c19 unsigned long nr_pages; /* number of pages collected */ member
33 unsigned long nr_pages; in iomap_swapfile_add_extent() local
40 if (unlikely(isi->nr_pages >= isi->sis->max)) in iomap_swapfile_add_extent()
42 max_pages = isi->sis->max - isi->nr_pages; in iomap_swapfile_add_extent()
55 nr_pages = next_ppage - first_ppage; in iomap_swapfile_add_extent()
56 nr_pages = min(nr_pages, max_pages); in iomap_swapfile_add_extent()
72 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); in iomap_swapfile_add_extent()
76 isi->nr_pages += nr_pages; in iomap_swapfile_add_extent()
184 if (isi.nr_pages == 0) { in iomap_swapfile_activate()
190 sis->max = isi.nr_pages; in iomap_swapfile_activate()
[all …]
/linux-6.6.21/drivers/media/v4l2-core/
Dvideobuf-dma-sg.c63 int nr_pages) in videobuf_vmalloc_to_sg() argument
69 sglist = vzalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_vmalloc_to_sg()
72 sg_init_table(sglist, nr_pages); in videobuf_vmalloc_to_sg()
73 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { in videobuf_vmalloc_to_sg()
93 int nr_pages, int offset, size_t size) in videobuf_pages_to_sg() argument
100 sglist = vmalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_pages_to_sg()
103 sg_init_table(sglist, nr_pages); in videobuf_pages_to_sg()
111 for (i = 1; i < nr_pages; i++) { in videobuf_pages_to_sg()
173 dma->nr_pages = last-first+1; in videobuf_dma_init_user_locked()
174 dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *), in videobuf_dma_init_user_locked()
[all …]
/linux-6.6.21/drivers/xen/
Dballoon.c386 static enum bp_state increase_reservation(unsigned long nr_pages) in increase_reservation() argument
392 if (nr_pages > ARRAY_SIZE(frame_list)) in increase_reservation()
393 nr_pages = ARRAY_SIZE(frame_list); in increase_reservation()
396 for (i = 0; i < nr_pages; i++) { in increase_reservation()
398 nr_pages = i; in increase_reservation()
406 rc = xenmem_reservation_increase(nr_pages, frame_list); in increase_reservation()
425 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) in decrease_reservation() argument
433 if (nr_pages > ARRAY_SIZE(frame_list)) in decrease_reservation()
434 nr_pages = ARRAY_SIZE(frame_list); in decrease_reservation()
436 for (i = 0; i < nr_pages; i++) { in decrease_reservation()
[all …]
Dunpopulated-alloc.c34 static int fill_list(unsigned int nr_pages) in fill_list() argument
39 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); in fill_list()
158 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) in xen_alloc_unpopulated_pages() argument
169 return xen_alloc_ballooned_pages(nr_pages, pages); in xen_alloc_unpopulated_pages()
172 if (list_count < nr_pages) { in xen_alloc_unpopulated_pages()
173 ret = fill_list(nr_pages - list_count); in xen_alloc_unpopulated_pages()
178 for (i = 0; i < nr_pages; i++) { in xen_alloc_unpopulated_pages()
214 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) in xen_free_unpopulated_pages() argument
219 xen_free_ballooned_pages(nr_pages, pages); in xen_free_unpopulated_pages()
224 for (i = 0; i < nr_pages; i++) { in xen_free_unpopulated_pages()
/linux-6.6.21/arch/arm64/kvm/hyp/nvhe/
Dsetup.c38 unsigned long nr_pages; in divide_memory_pool() local
42 nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page)); in divide_memory_pool()
43 vmemmap_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
47 nr_pages = hyp_vm_table_pages(); in divide_memory_pool()
48 vm_table_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
52 nr_pages = hyp_s1_pgtable_pages(); in divide_memory_pool()
53 hyp_pgt_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
57 nr_pages = host_s2_pgtable_pages(); in divide_memory_pool()
58 host_s2_pgt_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
62 nr_pages = hyp_ffa_proxy_pages(); in divide_memory_pool()
[all …]
Dmem_protect.c101 unsigned long nr_pages, pfn; in prepare_s2_pool() local
105 nr_pages = host_s2_pgtable_pages(); in prepare_s2_pool()
106 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0); in prepare_s2_pool()
235 unsigned long nr_pages; in kvm_guest_prepare_stage2() local
238 nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT; in kvm_guest_prepare_stage2()
239 ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0); in kvm_guest_prepare_stage2()
544 u64 nr_pages; member
635 u64 size = tx->nr_pages * PAGE_SIZE; in host_request_owned_transition()
645 u64 size = tx->nr_pages * PAGE_SIZE; in host_request_unshare()
655 u64 size = tx->nr_pages * PAGE_SIZE; in host_initiate_share()
[all …]
/linux-6.6.21/include/xen/
Dxen.h65 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
66 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
71 static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages, in xen_alloc_unpopulated_pages() argument
74 return xen_alloc_ballooned_pages(nr_pages, pages); in xen_alloc_unpopulated_pages()
76 static inline void xen_free_unpopulated_pages(unsigned int nr_pages, in xen_free_unpopulated_pages() argument
79 xen_free_ballooned_pages(nr_pages, pages); in xen_free_unpopulated_pages()
/linux-6.6.21/drivers/firmware/efi/libstub/
Drelocate.c28 unsigned long nr_pages; in efi_low_alloc_above() local
45 nr_pages = size / EFI_PAGE_SIZE; in efi_low_alloc_above()
60 if (desc->num_pages < nr_pages) in efi_low_alloc_above()
74 EFI_LOADER_DATA, nr_pages, &start); in efi_low_alloc_above()
120 unsigned long nr_pages; in efi_relocate_kernel() local
138 nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; in efi_relocate_kernel()
140 EFI_LOADER_DATA, nr_pages, &efi_addr); in efi_relocate_kernel()
/linux-6.6.21/net/rds/
Dinfo.c163 unsigned long nr_pages = 0; in rds_info_getsockopt() local
187 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) in rds_info_getsockopt()
190 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt()
195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt()
196 if (ret != nr_pages) { in rds_info_getsockopt()
198 nr_pages = ret; in rds_info_getsockopt()
200 nr_pages = 0; in rds_info_getsockopt()
205 rdsdebug("len %d nr_pages %lu\n", len, nr_pages); in rds_info_getsockopt()
238 unpin_user_pages(pages, nr_pages); in rds_info_getsockopt()
/linux-6.6.21/fs/crypto/
Dbio.c119 unsigned int nr_pages; in fscrypt_zeroout_range() local
133 nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), in fscrypt_zeroout_range()
143 for (i = 0; i < nr_pages; i++) { in fscrypt_zeroout_range()
149 nr_pages = i; in fscrypt_zeroout_range()
150 if (WARN_ON_ONCE(nr_pages <= 0)) in fscrypt_zeroout_range()
154 bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS); in fscrypt_zeroout_range()
179 } while (i != nr_pages && len != 0); in fscrypt_zeroout_range()
189 for (i = 0; i < nr_pages; i++) in fscrypt_zeroout_range()
/linux-6.6.21/arch/arm64/include/asm/
Dkvm_pkvm.h29 unsigned long nr_pages = reg->size >> PAGE_SHIFT; in hyp_vmemmap_memblock_size() local
33 end = start + nr_pages * vmemmap_entry_size; in hyp_vmemmap_memblock_size()
57 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) in __hyp_pgtable_max_pages() argument
63 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); in __hyp_pgtable_max_pages()
64 total += nr_pages; in __hyp_pgtable_max_pages()
/linux-6.6.21/kernel/events/
Dring_buffer.c175 if (rb->nr_pages) { in __perf_output_begin()
238 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin()
334 if (!rb->nr_pages) in ring_buffer_init()
672 pgoff_t pgoff, int nr_pages, long watermark, int flags) in rb_alloc_aux() argument
687 watermark = nr_pages << (PAGE_SHIFT - 1); in rb_alloc_aux()
699 max_order = ilog2(nr_pages); in rb_alloc_aux()
707 if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER) in rb_alloc_aux()
709 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL, in rb_alloc_aux()
715 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux()
719 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux()
[all …]
/linux-6.6.21/drivers/misc/bcm-vk/
Dbcm_vk_sg.c60 dma->nr_pages = last - first + 1; in bcm_vk_dma_alloc()
63 dma->pages = kmalloc_array(dma->nr_pages, in bcm_vk_dma_alloc()
70 data, vkdata->size, dma->nr_pages); in bcm_vk_dma_alloc()
76 dma->nr_pages, in bcm_vk_dma_alloc()
79 if (err != dma->nr_pages) { in bcm_vk_dma_alloc()
80 dma->nr_pages = (err >= 0) ? err : 0; in bcm_vk_dma_alloc()
82 err, dma->nr_pages); in bcm_vk_dma_alloc()
87 dma->sglen = (dma->nr_pages * sizeof(*sgdata)) + in bcm_vk_dma_alloc()
117 for (i = 1; i < dma->nr_pages; i++) { in bcm_vk_dma_alloc()
250 for (i = 0; i < dma->nr_pages; i++) in bcm_vk_dma_free()
/linux-6.6.21/arch/powerpc/platforms/powernv/
Dmemtrace.c92 unsigned long nr_pages) in memtrace_clear_range() argument
97 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { in memtrace_clear_range()
107 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), in memtrace_clear_range()
113 const unsigned long nr_pages = PHYS_PFN(size); in memtrace_alloc_node() local
121 page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | in memtrace_alloc_node()
132 memtrace_clear_range(start_pfn, nr_pages); in memtrace_alloc_node()
138 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_alloc_node()
216 const unsigned long nr_pages = PHYS_PFN(size); in memtrace_free() local
225 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_free()
228 free_contig_range(start_pfn, nr_pages); in memtrace_free()

12345678910>>...16