Home
last modified time | relevance | path

Searched refs:page_address (Results 1 – 25 of 514) sorted by relevance

12345678910>>...21

/linux-6.6.21/fs/btrfs/
Daccessors.c30 token->kaddr = page_address(eb->pages[0]); in btrfs_init_map_token()
77 token->kaddr = page_address(token->eb->pages[idx]); \
83 token->kaddr = page_address(token->eb->pages[idx + 1]); \
94 char *kaddr = page_address(eb->pages[idx]); \
104 kaddr = page_address(eb->pages[idx + 1]); \
128 token->kaddr = page_address(token->eb->pages[idx]); \
136 token->kaddr = page_address(token->eb->pages[idx + 1]); \
146 char *kaddr = page_address(eb->pages[idx]); \
159 kaddr = page_address(eb->pages[idx + 1]); \
/linux-6.6.21/crypto/async_tx/
Draid6test.c40 get_random_bytes(page_address(data[i]), PAGE_SIZE); in makedata()
131 memset(page_address(recovi), 0xf0, PAGE_SIZE); in test_disks()
132 memset(page_address(recovj), 0xba, PAGE_SIZE); in test_disks()
139 erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); in test_disks()
140 errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); in test_disks()
167 memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); in test()
168 memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); in test()
/linux-6.6.21/include/linux/
Dhighmem-internal.h46 addr = page_address(page); in kmap()
170 return page_address(page); in kmap()
179 kunmap_flush_on_unmap(page_address(page)); in kunmap()
185 return page_address(page); in kmap_local_page()
190 return page_address(&folio->page) + offset; in kmap_local_folio()
217 return page_address(page); in kmap_atomic()
/linux-6.6.21/mm/kmsan/
Dshadow.c28 return page_address(shadow_page_for(page)); in shadow_ptr_for()
33 return page_address(origin_page_for(page)); in origin_ptr_for()
160 kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE, in kmsan_copy_page_meta()
186 __memset(page_address(shadow), 0, PAGE_SIZE * pages); in kmsan_alloc_page()
187 __memset(page_address(origin), 0, PAGE_SIZE * pages); in kmsan_alloc_page()
195 __memset(page_address(shadow), -1, PAGE_SIZE * pages); in kmsan_alloc_page()
204 ((depot_stack_handle_t *)page_address(origin))[i] = handle; in kmsan_alloc_page()
212 kmsan_internal_poison_memory(page_address(page), in kmsan_free_page()
/linux-6.6.21/arch/riscv/mm/
Dpageattr.c122 ptep_new = (pte_t *)page_address(pte_page); in __split_linear_mapping_pmd()
162 pmdp_new = (pmd_t *)page_address(pmd_page); in __split_linear_mapping_pud()
215 pudp_new = (pud_t *)page_address(pud_page); in __split_linear_mapping_p4d()
295 lm_start = (unsigned long)page_address(area->pages[i]); in __set_memory()
379 return __set_memory((unsigned long)page_address(page), 1, in set_direct_map_invalid_noflush()
385 return __set_memory((unsigned long)page_address(page), 1, in set_direct_map_default_noflush()
396 __set_memory((unsigned long)page_address(page), numpages, in __kernel_map_pages()
399 __set_memory((unsigned long)page_address(page), numpages, in __kernel_map_pages()
406 unsigned long addr = (unsigned long)page_address(page); in kernel_page_present()
/linux-6.6.21/arch/arm64/mm/
Dpageattr.c111 __change_memory_common((u64)page_address(area->pages[i]), in change_memory_common()
176 (unsigned long)page_address(page), in set_direct_map_invalid_noflush()
191 (unsigned long)page_address(page), in set_direct_map_default_noflush()
201 set_memory_valid((unsigned long)page_address(page), numpages, enable); in __kernel_map_pages()
220 unsigned long addr = (unsigned long)page_address(page); in kernel_page_present()
Dcopypage.c19 void *kto = page_address(to); in copy_highpage()
20 void *kfrom = page_address(from); in copy_highpage()
Dmteswap.c34 mte_save_page_tags(page_address(page), tag_storage); in mte_save_tags()
58 mte_restore_page_tags(page_address(page), tags); in mte_restore_tags()
/linux-6.6.21/security/selinux/
Dstatus.c52 status = page_address(selinux_state.status_page); in selinux_kernel_status_page()
85 status = page_address(selinux_state.status_page); in selinux_status_update_setenforce()
110 status = page_address(selinux_state.status_page); in selinux_status_update_policyload()
/linux-6.6.21/fs/nilfs2/
Ddir.c121 char *kaddr = page_address(page); in nilfs_check_page()
285 kaddr = page_address(page); in nilfs_readdir()
352 kaddr = page_address(page); in nilfs_find_entry()
395 (struct nilfs_dir_entry *)page_address(page)); in nilfs_dotdot()
420 unsigned int from = (char *)de - (char *)page_address(page); in nilfs_set_link()
467 kaddr = page_address(page); in nilfs_add_link()
504 from = (char *)de - (char *)page_address(page); in nilfs_add_link()
542 char *kaddr = page_address(page); in nilfs_delete_entry()
562 from = (char *)pde - (char *)page_address(page); in nilfs_delete_entry()
634 kaddr = page_address(page); in nilfs_empty_dir()
/linux-6.6.21/fs/ufs/
Ddir.c96 (char *) de - (char *) page_address(page); in ufs_set_link()
120 char *kaddr = page_address(page); in ufs_check_page()
242 (struct ufs_dir_entry *)page_address(page)); in ufs_dotdot()
286 kaddr = page_address(page); in ufs_find_entry()
343 kaddr = page_address(page); in ufs_add_link()
381 (char*)de - (char*)page_address(page); in ufs_add_link()
460 kaddr = page_address(page); in ufs_readdir()
506 char *kaddr = page_address(page); in ufs_delete_entry()
532 from = (char*)pde - (char*)page_address(page); in ufs_delete_entry()
571 base = (char*)page_address(page); in ufs_make_empty()
[all …]
/linux-6.6.21/arch/x86/kernel/
Dmachine_kexec_32.c105 control_page = page_address(image->control_code_page); in machine_kexec_prepare_page_tables()
139 set_memory_x((unsigned long)page_address(image->control_code_page), 1); in machine_kexec_prepare()
153 set_memory_nx((unsigned long)page_address(image->control_code_page), 1); in machine_kexec_cleanup()
197 control_page = page_address(image->control_code_page); in machine_kexec()
Despfix_64.c159 pmd_p = (pmd_t *)page_address(page); in init_espfix_ap()
171 pte_p = (pte_t *)page_address(page); in init_espfix_ap()
179 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); in init_espfix_ap()
Dirq_32.c127 per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph); in irq_init_percpu_irqstack()
128 per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps); in irq_init_percpu_irqstack()
/linux-6.6.21/net/ceph/
Dcls_lock_client.c56 p = page_address(lock_op_page); in ceph_cls_lock()
115 p = page_address(unlock_op_page); in ceph_cls_unlock()
168 p = page_address(break_op_page); in ceph_cls_break_lock()
217 p = page_address(cookie_op_page); in ceph_cls_set_cookie()
362 p = page_address(get_info_op_page); in ceph_cls_lock_info()
377 p = page_address(reply_page); in ceph_cls_lock_info()
415 p = page_address(pages[0]); in ceph_cls_assert_locked()
Dpagevec.c72 bad = copy_from_user(page_address(pages[i]) + po, data, l); in ceph_copy_user_to_page_vector()
98 memcpy(page_address(pages[i]) + po, data, l); in ceph_copy_to_page_vector()
121 memcpy(data, page_address(pages[i]) + po, l); in ceph_copy_from_page_vector()
/linux-6.6.21/drivers/gpu/drm/v3d/
Dv3d_mmu.c97 u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; in v3d_mmu_insert_ptes() local
98 u32 pte = page_prot | page_address; in v3d_mmu_insert_ptes()
101 BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= in v3d_mmu_insert_ptes()
/linux-6.6.21/mm/
Dhighmem.c271 if (page_address(page)) in map_new_virtual()
272 return (unsigned long)page_address(page); in map_new_virtual()
305 vaddr = (unsigned long)page_address(page); in kmap_high()
331 vaddr = (unsigned long)page_address(page); in kmap_high_get()
358 vaddr = (unsigned long)page_address(page); in kunmap_high()
574 return page_address(page); in __kmap_local_page_prot()
742 void *page_address(const struct page *page) in page_address() function
768 EXPORT_SYMBOL(page_address);
/linux-6.6.21/fs/minix/
Ddir.c111 kaddr = (char *)page_address(page); in minix_readdir()
180 kaddr = (char*)page_address(page); in minix_find_entry()
237 kaddr = (char*)page_address(page); in minix_add_link()
271 pos = page_offset(page) + p - (char *)page_address(page); in minix_add_link()
299 char *kaddr = page_address(page); in minix_delete_entry()
383 kaddr = (char *)page_address(page); in minix_empty_dir()
425 (char *)de-(char*)page_address(page); in minix_set_link()
451 de = minix_next_entry(page_address(page), sbi); in minix_dotdot()
/linux-6.6.21/drivers/mtd/devices/
Dblock2mtd.c70 max = page_address(page) + PAGE_SIZE; in _block2mtd_erase()
71 for (p=page_address(page); p<max; p++) in _block2mtd_erase()
74 memset(page_address(page), 0xff, PAGE_SIZE); in _block2mtd_erase()
124 memcpy(buf, page_address(page) + offset, cpylen); in block2mtd_read()
158 if (memcmp(page_address(page)+offset, buf, cpylen)) { in _block2mtd_write()
160 memcpy(page_address(page) + offset, buf, cpylen); in _block2mtd_write()
/linux-6.6.21/arch/arm/mm/
Dcopypage-v6.c80 discard_old_kernel_data(page_address(to)); in v6_copy_user_highpage_aliasing()
109 discard_old_kernel_data(page_address(page)); in v6_clear_user_highpage_aliasing()
/linux-6.6.21/fs/ntfs/
Dcompress.c89 u8 *kp = page_address(page); in zero_partial_compressed_page()
264 dp_addr = (u8*)page_address(dp) + do_sb_start; in ntfs_decompress()
741 clear_page(page_address(page)); in ntfs_read_compressed_block()
743 memset(page_address(page) + cur_ofs, 0, in ntfs_read_compressed_block()
765 memset(page_address(page) + cur_ofs, 0, in ntfs_read_compressed_block()
797 memcpy(page_address(page) + cur_ofs, cb_pos, in ntfs_read_compressed_block()
808 memcpy(page_address(page) + cur_ofs, cb_pos, in ntfs_read_compressed_block()
/linux-6.6.21/mm/kasan/
Dcommon.c110 kasan_unpoison(set_tag(page_address(page), tag), in __kasan_unpoison_pages()
121 kasan_poison(page_address(page), PAGE_SIZE << order, in __kasan_poison_pages()
132 kasan_poison(page_address(page), page_size(page), in __kasan_poison_slab()
252 if (ptr != page_address(virt_to_head_page(ptr))) { in ____kasan_kfree_large()
/linux-6.6.21/arch/powerpc/mm/
Ddma-noncoherent.c102 unsigned long start = (unsigned long)page_address(page) + offset; in __dma_sync_page()
121 unsigned long kaddr = (unsigned long)page_address(page); in arch_dma_prep_coherent()
/linux-6.6.21/kernel/power/
Dsnapshot.c61 static inline void hibernate_restore_protect_page(void *page_address) in hibernate_restore_protect_page() argument
64 set_memory_ro((unsigned long)page_address, 1); in hibernate_restore_protect_page()
67 static inline void hibernate_restore_unprotect_page(void *page_address) in hibernate_restore_unprotect_page() argument
70 set_memory_rw((unsigned long)page_address, 1); in hibernate_restore_unprotect_page()
75 static inline void hibernate_restore_protect_page(void *page_address) {} in hibernate_restore_protect_page() argument
76 static inline void hibernate_restore_unprotect_page(void *page_address) {} in hibernate_restore_unprotect_page() argument
101 unsigned long addr = (unsigned long)page_address(page); in hibernate_unmap_page()
237 static void recycle_safe_page(void *page_address) in recycle_safe_page() argument
239 struct linked_page *lp = page_address; in recycle_safe_page()
1465 zeros_only = do_copy_page(dst, page_address(s_page)); in safe_copy_page()
[all …]

12345678910>>...21