Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 25 of 1302) sorted by relevance

12345678910>>...53

/linux-6.6.21/drivers/iommu/iommufd/
Dpages.c163 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument
167 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned()
169 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned()
172 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument
176 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned()
178 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned()
181 static void iopt_pages_err_unpin(struct iopt_pages *pages, in iopt_pages_err_unpin() argument
189 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin()
249 static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages, in iopt_pages_find_domain_area() argument
254 node = interval_tree_iter_first(&pages->domains_itree, index, index); in iopt_pages_find_domain_area()
[all …]
Dio_pagetable.c23 struct iopt_pages *pages; member
42 if (!iter->area->pages) { in iopt_area_contig_init()
65 !iter->area->pages) { in iopt_area_contig_next()
190 struct iopt_pages *pages, unsigned long iova, in iopt_insert_area() argument
196 if ((iommu_prot & IOMMU_WRITE) && !pages->writable) in iopt_insert_area()
212 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area()
263 (uintptr_t)elm->pages->uptr + elm->start_byte, length); in iopt_alloc_area_pages()
283 rc = iopt_insert_area(iopt, elm->area, elm->pages, iova, in iopt_alloc_area_pages()
298 WARN_ON(area->pages); in iopt_abort_area()
315 if (elm->pages) in iopt_free_pages_list()
[all …]
/linux-6.6.21/net/ceph/
Dpagevec.c13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument
19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector()
20 put_page(pages[i]); in ceph_put_page_vector()
22 kvfree(pages); in ceph_put_page_vector()
26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument
31 __free_pages(pages[i], 0); in ceph_release_page_vector()
32 kfree(pages); in ceph_release_page_vector()
41 struct page **pages; in ceph_alloc_page_vector() local
44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector()
45 if (!pages) in ceph_alloc_page_vector()
[all …]
/linux-6.6.21/mm/
Dpercpu-vm.c34 static struct page **pages; in pcpu_get_pages() local
35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages()
39 if (!pages) in pcpu_get_pages()
40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages()
41 return pages; in pcpu_get_pages()
55 struct page **pages, int page_start, int page_end) in pcpu_free_pages() argument
62 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
83 struct page **pages, int page_start, int page_end, in pcpu_alloc_pages() argument
93 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
104 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
[all …]
Dgup_test.c10 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument
19 put_page(pages[i]); in put_back_pages()
25 unpin_user_pages(pages, nr_pages); in put_back_pages()
29 unpin_user_pages(pages, nr_pages); in put_back_pages()
32 put_page(pages[i]); in put_back_pages()
39 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument
50 folio = page_folio(pages[i]); in verify_dma_pinned()
69 static void dump_pages_test(struct gup_test *gup, struct page **pages, in dump_pages_test() argument
94 dump_page(pages[index_to_dump], in dump_pages_test()
106 struct page **pages; in __gup_test_ioctl() local
[all …]
Dgup.c33 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument
51 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
52 struct page *page = *pages; in sanity_check_pinned_pages()
358 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
366 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
370 sanity_check_pinned_pages(pages, npages); in unpin_user_pages_dirty_lock()
372 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_dirty_lock()
443 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages) in unpin_user_pages_lockless() argument
455 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_lockless()
469 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
[all …]
/linux-6.6.21/drivers/gpu/drm/i915/gem/selftests/
Dhuge_gem_object.c12 struct sg_table *pages) in huge_free_pages() argument
18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages()
24 sg_free_table(pages); in huge_free_pages()
25 kfree(pages); in huge_free_pages()
34 struct sg_table *pages; in huge_get_pages() local
41 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages()
42 if (!pages) in huge_get_pages()
45 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages()
46 kfree(pages); in huge_get_pages()
50 sg = pages->sgl; in huge_get_pages()
[all …]
/linux-6.6.21/fs/isofs/
Dcompress.c42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument
68 if (!pages[i]) in zisofs_uncompress_block()
70 memzero_page(pages[i], 0, PAGE_SIZE); in zisofs_uncompress_block()
71 SetPageUptodate(pages[i]); in zisofs_uncompress_block()
121 if (pages[curpage]) { in zisofs_uncompress_block()
122 stream.next_out = kmap_local_page(pages[curpage]) in zisofs_uncompress_block()
174 if (pages[curpage]) { in zisofs_uncompress_block()
175 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block()
176 SetPageUptodate(pages[curpage]); in zisofs_uncompress_block()
207 struct page **pages) in zisofs_fill_pages() argument
[all …]
/linux-6.6.21/fs/erofs/
Dpcpubuf.c15 struct page **pages; member
64 struct page **pages, **oldpages; in erofs_pcpubuf_growsize() local
67 pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL); in erofs_pcpubuf_growsize()
68 if (!pages) { in erofs_pcpubuf_growsize()
74 pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL); in erofs_pcpubuf_growsize()
75 if (!pages[i]) { in erofs_pcpubuf_growsize()
77 oldpages = pages; in erofs_pcpubuf_growsize()
81 ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL); in erofs_pcpubuf_growsize()
84 oldpages = pages; in erofs_pcpubuf_growsize()
90 oldpages = pcb->pages; in erofs_pcpubuf_growsize()
[all …]
/linux-6.6.21/drivers/xen/
Dxlate_mmu.c48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument
57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn()
71 struct page **pages; member
99 struct page *page = info->pages[info->index++]; in remap_pte_fn()
148 struct page **pages) in xen_xlate_remap_gfn_array() argument
163 data.pages = pages; in xen_xlate_remap_gfn_array()
184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument
186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range()
217 struct page **pages; in xen_xlate_map_ballooned_pages() local
226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages()
[all …]
/linux-6.6.21/kernel/dma/
Dremap.c15 return area->pages; in dma_common_find_pages()
22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument
27 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap()
30 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap()
42 struct page **pages; in dma_common_contiguous_remap() local
46 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
47 if (!pages) in dma_common_contiguous_remap()
50 pages[i] = nth_page(page, i); in dma_common_contiguous_remap()
51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
52 kvfree(pages); in dma_common_contiguous_remap()
/linux-6.6.21/drivers/gpu/drm/xen/
Dxen_drm_front_gem.c29 struct page **pages; member
48 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array()
50 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array()
55 kvfree(xen_obj->pages); in gem_free_pages_array()
56 xen_obj->pages = NULL; in gem_free_pages_array()
92 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_object_mmap()
159 xen_obj->pages); in gem_create()
175 xen_obj->pages = drm_gem_get_pages(&xen_obj->base); in gem_create()
176 if (IS_ERR(xen_obj->pages)) { in gem_create()
177 ret = PTR_ERR(xen_obj->pages); in gem_create()
[all …]
/linux-6.6.21/arch/s390/hypfs/
Dhypfs_diag.c58 void *diag204_get_buffer(enum diag204_format fmt, int *pages) in diag204_get_buffer() argument
61 *pages = diag204_buf_pages; in diag204_get_buffer()
65 *pages = 1; in diag204_get_buffer()
67 *pages = diag204((unsigned long)DIAG204_SUBC_RSI | in diag204_get_buffer()
69 if (*pages <= 0) in diag204_get_buffer()
72 diag204_buf = __vmalloc_node(array_size(*pages, PAGE_SIZE), in diag204_get_buffer()
77 diag204_buf_pages = *pages; in diag204_get_buffer()
98 int pages, rc; in diag204_probe() local
100 buf = diag204_get_buffer(DIAG204_INFO_EXT, &pages); in diag204_probe()
103 (unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) { in diag204_probe()
[all …]
/linux-6.6.21/drivers/staging/media/ipu3/
Dipu3-dmamap.c20 static void imgu_dmamap_free_buffer(struct page **pages, in imgu_dmamap_free_buffer() argument
26 __free_page(pages[count]); in imgu_dmamap_free_buffer()
27 kvfree(pages); in imgu_dmamap_free_buffer()
36 struct page **pages; in imgu_dmamap_alloc_buffer() local
42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer()
44 if (!pages) in imgu_dmamap_alloc_buffer()
72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); in imgu_dmamap_alloc_buffer()
77 pages[i++] = page++; in imgu_dmamap_alloc_buffer()
80 return pages; in imgu_dmamap_alloc_buffer()
100 struct page **pages; in imgu_dmamap_alloc() local
[all …]
/linux-6.6.21/drivers/media/common/videobuf2/
Dframe_vector.c82 struct page **pages; in put_vaddr_frames() local
86 pages = frame_vector_pages(vec); in put_vaddr_frames()
92 if (WARN_ON(IS_ERR(pages))) in put_vaddr_frames()
95 unpin_user_pages(pages, vec->nr_frames); in put_vaddr_frames()
114 struct page **pages; in frame_vector_to_pages() local
122 pages = (struct page **)nums; in frame_vector_to_pages()
124 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages()
140 struct page **pages; in frame_vector_to_pfns() local
144 pages = (struct page **)(vec->ptrs); in frame_vector_to_pfns()
145 nums = (unsigned long *)pages; in frame_vector_to_pfns()
[all …]
/linux-6.6.21/Documentation/admin-guide/mm/
Dhugetlbpage.rst28 persistent hugetlb pages in the kernel's huge page pool. It also displays
30 and surplus huge pages in the pool of huge pages of default size.
46 is the size of the pool of huge pages.
48 is the number of huge pages in the pool that are not yet
51 is short for "reserved," and is the number of huge pages for
53 but no allocation has yet been made. Reserved huge pages
55 huge page from the pool of huge pages at fault time.
57 is short for "surplus," and is the number of huge pages in
59 maximum number of surplus huge pages is controlled by
61 Note: When the feature of freeing unused vmemmap pages associated
[all …]
/linux-6.6.21/include/linux/
Dballoon_compaction.h57 struct list_head pages; /* Pages enqueued & handled to Host */ member
67 struct list_head *pages);
69 struct list_head *pages, size_t n_req_pages);
75 INIT_LIST_HEAD(&balloon->pages); in balloon_devinfo_init()
97 list_add(&page->lru, &balloon->pages); in balloon_page_insert()
141 list_add(&page->lru, &balloon->pages); in balloon_page_insert()
164 static inline void balloon_page_push(struct list_head *pages, struct page *page) in balloon_page_push() argument
166 list_add(&page->lru, pages); in balloon_page_push()
176 static inline struct page *balloon_page_pop(struct list_head *pages) in balloon_page_pop() argument
178 struct page *page = list_first_entry_or_null(pages, struct page, lru); in balloon_page_pop()
/linux-6.6.21/net/rds/
Dinfo.c65 struct page **pages; member
122 iter->addr = kmap_atomic(*iter->pages); in rds_info_copy()
127 "bytes %lu\n", *iter->pages, iter->addr, in rds_info_copy()
140 iter->pages++; in rds_info_copy()
166 struct page **pages = NULL; in rds_info_getsockopt() local
190 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt()
191 if (!pages) { in rds_info_getsockopt()
195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt()
214 iter.pages = pages; in rds_info_getsockopt()
237 if (pages) in rds_info_getsockopt()
[all …]
/linux-6.6.21/Documentation/powerpc/
Dvmemmap_dedup.rst14 With 2M PMD level mapping, we require 32 struct pages and a single 64K vmemmap
15 page can contain 1024 struct pages (64K/sizeof(struct page)). Hence there is no
18 With 1G PUD level mapping, we require 16384 struct pages and a single 64K
19 vmemmap page can contain 1024 struct pages (64K/sizeof(struct page)). Hence we
20 require 16 64K pages in vmemmap to map the struct page for 1G PUD level mapping.
46 With 4K page size, 2M PMD level mapping requires 512 struct pages and a single
47 4K vmemmap page contains 64 struct pages(4K/sizeof(struct page)). Hence we
48 require 8 4K pages in vmemmap to map the struct page for 2M pmd level mapping.
74 With 1G PUD level mapping, we require 262144 struct pages and a single 4K
75 vmemmap page can contain 64 struct pages (4K/sizeof(struct page)). Hence we
[all …]
/linux-6.6.21/drivers/gpu/drm/
Ddrm_scatter.c51 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup()
83 unsigned long pages, i, j; in drm_legacy_sg_alloc() local
103 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_legacy_sg_alloc()
104 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); in drm_legacy_sg_alloc()
106 entry->pages = pages; in drm_legacy_sg_alloc()
107 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); in drm_legacy_sg_alloc()
113 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); in drm_legacy_sg_alloc()
120 entry->virtual = vmalloc_32(pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
131 memset(entry->virtual, 0, pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
138 for (i = (unsigned long)entry->virtual, j = 0; j < pages; in drm_legacy_sg_alloc()
[all …]
/linux-6.6.21/drivers/gpu/drm/i915/gem/
Di915_gem_pages.c19 struct sg_table *pages) in __i915_gem_object_set_pages() argument
36 drm_clflush_sg(pages); in __i915_gem_object_set_pages()
40 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages()
42 obj->mm.get_dma_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages()
45 obj->mm.pages = pages; in __i915_gem_object_set_pages()
47 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); in __i915_gem_object_set_pages()
211 struct sg_table *pages; in __i915_gem_object_unset_pages() local
215 pages = fetch_and_zero(&obj->mm.pages); in __i915_gem_object_unset_pages()
216 if (IS_ERR_OR_NULL(pages)) in __i915_gem_object_unset_pages()
217 return pages; in __i915_gem_object_unset_pages()
[all …]
Di915_gem_phys.c101 struct sg_table *pages) in i915_gem_object_put_pages_phys() argument
103 dma_addr_t dma = sg_dma_address(pages->sgl); in i915_gem_object_put_pages_phys()
104 void *vaddr = sg_page(pages->sgl); in i915_gem_object_put_pages_phys()
106 __i915_gem_object_release_shmem(obj, pages, false); in i915_gem_object_put_pages_phys()
136 sg_free_table(pages); in i915_gem_object_put_pages_phys()
137 kfree(pages); in i915_gem_object_put_pages_phys()
147 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in i915_gem_object_pwrite_phys()
178 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in i915_gem_object_pread_phys()
197 struct sg_table *pages; in i915_gem_object_shmem_to_phys() local
200 pages = __i915_gem_object_unset_pages(obj); in i915_gem_object_shmem_to_phys()
[all …]
/linux-6.6.21/arch/m68k/mm/
Dsun3kmap.c49 unsigned long type, int pages) in do_pmeg_mapin() argument
55 while(pages) { in do_pmeg_mapin()
59 pages--; in do_pmeg_mapin()
68 int pages; in sun3_ioremap() local
87 pages = size / PAGE_SIZE; in sun3_ioremap()
91 while(pages) { in sun3_ioremap()
95 if(seg_pages > pages) in sun3_ioremap()
96 seg_pages = pages; in sun3_ioremap()
100 pages -= seg_pages; in sun3_ioremap()
/linux-6.6.21/arch/arm64/include/asm/
Dtlbflush.h159 #define __TLBI_RANGE_NUM(pages, scale) \ argument
160 ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
367 #define __flush_tlb_range_op(op, start, pages, stride, \ argument
374 while (pages > 0) { \
376 pages % 2 == 1) { \
382 pages -= stride >> PAGE_SHIFT; \
386 num = __TLBI_RANGE_NUM(pages, scale); \
394 pages -= __TLBI_RANGE_PAGES(num, scale); \
400 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ argument
401 __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
[all …]
/linux-6.6.21/fs/squashfs/
Dfile_direct.c33 int i, n, pages, bytes, res = -ENOMEM; in squashfs_readpage_block() local
41 pages = end_index - start_index + 1; in squashfs_readpage_block()
43 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); in squashfs_readpage_block()
64 pages = i; in squashfs_readpage_block()
70 actor = squashfs_page_actor_init_special(msblk, page, pages, expected); in squashfs_readpage_block()
89 if (page[pages - 1]->index == end_index && bytes) { in squashfs_readpage_block()
90 pageaddr = kmap_local_page(page[pages - 1]); in squashfs_readpage_block()
96 for (i = 0; i < pages; i++) { in squashfs_readpage_block()
112 for (i = 0; i < pages; i++) { in squashfs_readpage_block()

12345678910>>...53