/linux-6.1.9/fs/verity/ |
D | verify.c | 104 struct page *hpage; in verify_page() local 111 hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode, hindex, in verify_page() 113 if (IS_ERR(hpage)) { in verify_page() 114 err = PTR_ERR(hpage); in verify_page() 121 if (PageChecked(hpage)) { in verify_page() 122 memcpy_from_page(_want_hash, hpage, hoffset, hsize); in verify_page() 124 put_page(hpage); in verify_page() 131 hpages[level] = hpage; in verify_page() 141 struct page *hpage = hpages[level - 1]; in verify_page() local 144 err = fsverity_hash_page(params, inode, req, hpage, real_hash); in verify_page() [all …]
|
/linux-6.1.9/mm/ |
D | khugepaged.c | 799 static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node, in hpage_collapse_alloc_page() argument 802 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask); in hpage_collapse_alloc_page() 803 if (unlikely(!*hpage)) { in hpage_collapse_alloc_page() 808 prep_transhuge_page(*hpage); in hpage_collapse_alloc_page() 950 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, in alloc_charge_hpage() argument 957 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask)) in alloc_charge_hpage() 959 if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp))) in alloc_charge_hpage() 961 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC); in alloc_charge_hpage() 973 struct page *hpage; in collapse_huge_page() local 989 result = alloc_charge_hpage(&hpage, mm, cc); in collapse_huge_page() [all …]
|
D | memory-failure.c | 1081 struct page *hpage = compound_head(p); in me_huge_page() local 1085 if (!PageHuge(hpage)) in me_huge_page() 1088 mapping = page_mapping(hpage); in me_huge_page() 1090 res = truncate_error_page(hpage, page_to_pfn(p), mapping); in me_huge_page() 1093 unlock_page(hpage); in me_huge_page() 1095 unlock_page(hpage); in me_huge_page() 1101 put_page(hpage); in me_huge_page() 1401 int flags, struct page *hpage) in hwpoison_user_mappings() argument 1403 struct folio *folio = page_folio(hpage); in hwpoison_user_mappings() 1409 bool mlocked = PageMlocked(hpage); in hwpoison_user_mappings() [all …]
|
D | hwpoison-inject.c | 18 struct page *hpage; in hwpoison_inject() local 28 hpage = compound_head(p); in hwpoison_inject() 33 shake_page(hpage); in hwpoison_inject() 37 if (!PageLRU(hpage) && !PageHuge(p) && !is_free_buddy_page(p)) in hwpoison_inject() 45 err = hwpoison_filter(hpage); in hwpoison_inject()
|
D | migrate.c | 1251 struct page *hpage, int force, in unmap_and_move_huge_page() argument 1255 struct folio *dst, *src = page_folio(hpage); in unmap_and_move_huge_page() 1269 if (!hugepage_migration_supported(page_hstate(hpage))) in unmap_and_move_huge_page() 1274 putback_active_hugepage(hpage); in unmap_and_move_huge_page() 1278 new_hpage = get_new_page(hpage, private); in unmap_and_move_huge_page() 1301 if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) { in unmap_and_move_huge_page() 1322 mapping = hugetlb_page_mapping_lock_write(hpage); in unmap_and_move_huge_page() 1351 move_hugetlb_state(hpage, new_hpage, reason); in unmap_and_move_huge_page() 1359 putback_active_hugepage(hpage); in unmap_and_move_huge_page()
|
D | hugetlb.c | 2069 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) in hugetlb_page_mapping_lock_write() argument 2071 struct address_space *mapping = page_mapping(hpage); in hugetlb_page_mapping_lock_write()
|
/linux-6.1.9/drivers/dma-buf/ |
D | udmabuf.c | 179 struct page *page, *hpage = NULL; in udmabuf_create() local 237 if (!hpage) { in udmabuf_create() 238 hpage = find_get_page_flags(mapping, pgoff, in udmabuf_create() 240 if (!hpage) { in udmabuf_create() 245 page = hpage + subpgoff; in udmabuf_create() 249 put_page(hpage); in udmabuf_create() 250 hpage = NULL; in udmabuf_create() 266 if (hpage) { in udmabuf_create() 267 put_page(hpage); in udmabuf_create() 268 hpage = NULL; in udmabuf_create()
|
/linux-6.1.9/include/linux/ |
D | hugetlb.h | 196 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 257 struct page *hpage) in hugetlb_page_mapping_lock_write() argument 734 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) in hugetlb_page_subpool() argument 736 return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL); in hugetlb_page_subpool() 739 static inline void hugetlb_set_page_subpool(struct page *hpage, in hugetlb_set_page_subpool() argument 742 set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool); in hugetlb_set_page_subpool() 847 extern void hugetlb_clear_page_hwpoison(struct page *hpage); 849 static inline void hugetlb_clear_page_hwpoison(struct page *hpage) in hugetlb_clear_page_hwpoison() argument 986 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
|
/linux-6.1.9/io_uring/ |
D | rsrc.c | 1066 int nr_pages, struct page *hpage) in headpage_already_acct() argument 1074 if (compound_head(pages[i]) == hpage) in headpage_already_acct() 1085 if (compound_head(imu->bvec[j].bv_page) == hpage) in headpage_already_acct() 1104 struct page *hpage; in io_buffer_account_pin() local 1106 hpage = compound_head(pages[i]); in io_buffer_account_pin() 1107 if (hpage == *last_hpage) in io_buffer_account_pin() 1109 *last_hpage = hpage; in io_buffer_account_pin() 1110 if (headpage_already_acct(ctx, pages, i, hpage)) in io_buffer_account_pin() 1112 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT; in io_buffer_account_pin()
|
/linux-6.1.9/arch/powerpc/kvm/ |
D | book3s_pr.c | 647 struct page *hpage; in kvmppc_patch_dcbz() local 652 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); in kvmppc_patch_dcbz() 653 if (is_error_page(hpage)) in kvmppc_patch_dcbz() 660 get_page(hpage); in kvmppc_patch_dcbz() 661 page = kmap_atomic(hpage); in kvmppc_patch_dcbz() 669 put_page(hpage); in kvmppc_patch_dcbz()
|
/linux-6.1.9/Documentation/virt/kvm/x86/ |
D | running-nested-guests.rst | 171 .. note:: On s390x, the kernel parameter ``hpage`` is mutually exclusive 173 ``nested``, the ``hpage`` parameter *must* be disabled.
|
/linux-6.1.9/arch/s390/kvm/ |
D | kvm-s390.c | 193 static int hpage; variable 194 module_param(hpage, int, 0444); 195 MODULE_PARM_DESC(hpage, "1m huge page backing support"); 587 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension() 829 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap() 5652 if (nested && hpage) { in kvm_s390_init()
|
/linux-6.1.9/Documentation/virt/kvm/ |
D | api.rst | 7086 :Returns: 0 on success, -EINVAL if hpage module parameter was not set 7094 hpage module parameter is not set to 1, -EINVAL is returned.
|