Lines Matching refs:mm
146 void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr) in clear_page_tables() argument
148 pgd_t * page_dir = mm->pgd; in clear_page_tables()
150 spin_lock(&mm->page_table_lock); in clear_page_tables()
156 spin_unlock(&mm->page_table_lock); in clear_page_tables()
360 void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size) in zap_page_range() argument
367 dir = pgd_offset(mm, address); in zap_page_range()
378 spin_lock(&mm->page_table_lock); in zap_page_range()
379 flush_cache_range(mm, address, end); in zap_page_range()
380 tlb = tlb_gather_mmu(mm); in zap_page_range()
395 if (mm->rss > freed) in zap_page_range()
396 mm->rss -= freed; in zap_page_range()
398 mm->rss = 0; in zap_page_range()
399 spin_unlock(&mm->page_table_lock); in zap_page_range()
405 static struct page * follow_page(struct mm_struct *mm, unsigned long address, int write) in follow_page() argument
411 pgd = pgd_offset(mm, address); in follow_page()
454 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, in get_user_pages() argument
471 vma = find_extend_vma(mm, start); in get_user_pages()
476 spin_lock(&mm->page_table_lock); in get_user_pages()
479 while (!(map = follow_page(mm, start, write))) { in get_user_pages()
480 spin_unlock(&mm->page_table_lock); in get_user_pages()
481 switch (handle_mm_fault(mm, vma, start, write)) { in get_user_pages()
495 spin_lock(&mm->page_table_lock); in get_user_pages()
514 spin_unlock(&mm->page_table_lock); in get_user_pages()
524 spin_unlock(&mm->page_table_lock); in get_user_pages()
542 struct mm_struct * mm; in map_user_kiobuf() local
548 mm = current->mm; in map_user_kiobuf()
563 down_read(&mm->mmap_sem); in map_user_kiobuf()
565 err = get_user_pages(current, mm, va, pgcount, in map_user_kiobuf()
567 up_read(&mm->mmap_sem); in map_user_kiobuf()
773 static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, in zeromap_pmd_range() argument
783 pte_t * pte = pte_alloc(mm, pmd, address); in zeromap_pmd_range()
800 struct mm_struct *mm = current->mm; in zeromap_page_range() local
802 dir = pgd_offset(mm, address); in zeromap_page_range()
803 flush_cache_range(mm, beg, end); in zeromap_page_range()
807 spin_lock(&mm->page_table_lock); in zeromap_page_range()
809 pmd_t *pmd = pmd_alloc(mm, dir, address); in zeromap_page_range()
813 error = zeromap_pmd_range(mm, pmd, address, end - address, prot); in zeromap_page_range()
819 spin_unlock(&mm->page_table_lock); in zeromap_page_range()
820 flush_tlb_range(mm, beg, end); in zeromap_page_range()
853 static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigne… in remap_pmd_range() argument
864 pte_t * pte = pte_alloc(mm, pmd, address); in remap_pmd_range()
881 struct mm_struct *mm = current->mm; in remap_page_range() local
884 dir = pgd_offset(mm, from); in remap_page_range()
885 flush_cache_range(mm, beg, end); in remap_page_range()
889 spin_lock(&mm->page_table_lock); in remap_page_range()
891 pmd_t *pmd = pmd_alloc(mm, dir, from); in remap_page_range()
895 error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot); in remap_page_range()
901 spin_unlock(&mm->page_table_lock); in remap_page_range()
902 flush_tlb_range(mm, beg, end); in remap_page_range()
952 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, in do_wp_page() argument
967 spin_unlock(&mm->page_table_lock); in do_wp_page()
976 spin_unlock(&mm->page_table_lock); in do_wp_page()
986 spin_lock(&mm->page_table_lock); in do_wp_page()
989 ++mm->rss; in do_wp_page()
997 spin_unlock(&mm->page_table_lock); in do_wp_page()
1003 spin_unlock(&mm->page_table_lock); in do_wp_page()
1014 struct mm_struct *mm = mpnt->vm_mm; in vmtruncate_list() local
1022 zap_page_range(mm, start, len); in vmtruncate_list()
1035 zap_page_range(mm, start, len); in vmtruncate_list()
1122 static int do_swap_page(struct mm_struct * mm, in do_swap_page() argument
1131 spin_unlock(&mm->page_table_lock); in do_swap_page()
1142 spin_lock(&mm->page_table_lock); in do_swap_page()
1144 spin_unlock(&mm->page_table_lock); in do_swap_page()
1160 spin_lock(&mm->page_table_lock); in do_swap_page()
1162 spin_unlock(&mm->page_table_lock); in do_swap_page()
1174 mm->rss++; in do_swap_page()
1186 spin_unlock(&mm->page_table_lock); in do_swap_page()
1195 static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table,… in do_anonymous_page() argument
1207 spin_unlock(&mm->page_table_lock); in do_anonymous_page()
1214 spin_lock(&mm->page_table_lock); in do_anonymous_page()
1217 spin_unlock(&mm->page_table_lock); in do_anonymous_page()
1220 mm->rss++; in do_anonymous_page()
1232 spin_unlock(&mm->page_table_lock); in do_anonymous_page()
1251 static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma, in do_no_page() argument
1258 return do_anonymous_page(mm, vma, page_table, write_access, address); in do_no_page()
1259 spin_unlock(&mm->page_table_lock); in do_no_page()
1284 spin_lock(&mm->page_table_lock); in do_no_page()
1298 ++mm->rss; in do_no_page()
1308 spin_unlock(&mm->page_table_lock); in do_no_page()
1314 spin_unlock(&mm->page_table_lock); in do_no_page()
1339 static inline int handle_pte_fault(struct mm_struct *mm, in handle_pte_fault() argument
1353 return do_no_page(mm, vma, address, write_access, pte); in handle_pte_fault()
1354 return do_swap_page(mm, vma, address, pte, entry, write_access); in handle_pte_fault()
1359 return do_wp_page(mm, vma, address, pte, entry); in handle_pte_fault()
1365 spin_unlock(&mm->page_table_lock); in handle_pte_fault()
1372 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, in handle_mm_fault() argument
1379 pgd = pgd_offset(mm, address); in handle_mm_fault()
1385 spin_lock(&mm->page_table_lock); in handle_mm_fault()
1386 pmd = pmd_alloc(mm, pgd, address); in handle_mm_fault()
1389 pte_t * pte = pte_alloc(mm, pmd, address); in handle_mm_fault()
1391 return handle_pte_fault(mm, vma, address, write_access, pte); in handle_mm_fault()
1393 spin_unlock(&mm->page_table_lock); in handle_mm_fault()
1406 pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __pmd_alloc() argument
1411 new = pmd_alloc_one_fast(mm, address); in __pmd_alloc()
1413 spin_unlock(&mm->page_table_lock); in __pmd_alloc()
1414 new = pmd_alloc_one(mm, address); in __pmd_alloc()
1415 spin_lock(&mm->page_table_lock); in __pmd_alloc()
1429 pgd_populate(mm, pgd, new); in __pmd_alloc()
1440 pte_t fastcall *pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) in pte_alloc() argument
1446 new = pte_alloc_one_fast(mm, address); in pte_alloc()
1448 spin_unlock(&mm->page_table_lock); in pte_alloc()
1449 new = pte_alloc_one(mm, address); in pte_alloc()
1450 spin_lock(&mm->page_table_lock); in pte_alloc()
1464 pmd_populate(mm, pmd, new); in pte_alloc()
1475 vma = find_vma(current->mm, addr); in make_pages_present()
1482 ret = get_user_pages(current, current->mm, addr, in make_pages_present()