Lines Matching refs:page
31 static struct page *alloc_hugetlb_page(void) in alloc_hugetlb_page()
34 struct page *page; in alloc_hugetlb_page() local
42 page = list_entry(htlbpage_freelist.next, struct page, list); in alloc_hugetlb_page()
43 list_del(&page->list); in alloc_hugetlb_page()
46 set_page_count(page, 1); in alloc_hugetlb_page()
48 clear_highpage(&page[i]); in alloc_hugetlb_page()
49 return page; in alloc_hugetlb_page()
89 struct page *page, pte_t * page_table, int write_access) in set_huge_pte() argument
96 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); in set_huge_pte()
98 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); in set_huge_pte()
186 struct page *ptepage; in copy_hugetlb_page_range()
209 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page()
216 struct page *page; in follow_hugetlb_page() local
224 page = pte_page(pte); in follow_hugetlb_page()
226 page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT); in follow_hugetlb_page()
227 pages[i] = page; in follow_hugetlb_page()
243 void free_huge_page(struct page *page) in free_huge_page() argument
245 BUG_ON(page_count(page)); in free_huge_page()
246 BUG_ON(page->mapping); in free_huge_page()
248 INIT_LIST_HEAD(&page->list); in free_huge_page()
251 list_add(&page->list, &htlbpage_freelist); in free_huge_page()
256 void huge_page_release(struct page *page) in huge_page_release() argument
258 if (!put_page_testzero(page)) in huge_page_release()
261 free_huge_page(page); in huge_page_release()
269 struct page *page; in unmap_hugepage_range() local
278 page = pte_page(*pte); in unmap_hugepage_range()
279 huge_page_release(page); in unmap_hugepage_range()
308 struct page *page; in hugetlb_prefault() local
319 page = find_get_page(mapping, idx); in hugetlb_prefault()
320 if (!page) { in hugetlb_prefault()
326 page = alloc_hugetlb_page(); in hugetlb_prefault()
327 if (!page) { in hugetlb_prefault()
332 add_to_page_cache(page, mapping, idx); in hugetlb_prefault()
333 unlock_page(page); in hugetlb_prefault()
335 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); in hugetlb_prefault()
365 void update_and_free_page(struct page *page) in update_and_free_page() argument
368 struct page *map; in update_and_free_page()
370 map = page; in update_and_free_page()
378 set_page_count(page, 1); in update_and_free_page()
379 __free_pages(page, HUGETLB_PAGE_ORDER); in update_and_free_page()
385 struct page *page, *map; in try_to_free_low() local
398 page = list_entry(p, struct page, list); in try_to_free_low()
399 if ((page_zone(page))->name[0] != 'H') //Look for non-Highmem zones. in try_to_free_low()
400 map = page; in try_to_free_low()
415 struct page *page, *map; in set_hugetlb_mem_size() local
426 page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER); in set_hugetlb_mem_size()
427 if (page == NULL) in set_hugetlb_mem_size()
429 map = page; in set_hugetlb_mem_size()
435 list_add(&page->list, &htlbpage_freelist); in set_hugetlb_mem_size()
445 page = alloc_hugetlb_page(); in set_hugetlb_mem_size()
446 if (page == NULL) in set_hugetlb_mem_size()
449 update_and_free_page(page); in set_hugetlb_mem_size()
473 struct page *page; in hugetlb_init() local
476 page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER); in hugetlb_init()
477 if (!page) in hugetlb_init()
480 SetPageReserved(&page[j]); in hugetlb_init()
482 list_add(&page->list, &htlbpage_freelist); in hugetlb_init()
509 static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int unused) in hugetlb_nopage()