Lines Matching refs:mm

154 	struct mm_struct *mm = current->mm;  in sys_brk()  local
156 down_write(&mm->mmap_sem); in sys_brk()
158 if (brk < mm->end_code) in sys_brk()
161 oldbrk = PAGE_ALIGN(mm->brk); in sys_brk()
166 if (brk <= mm->brk) { in sys_brk()
167 if (!do_munmap(mm, newbrk, oldbrk-newbrk)) in sys_brk()
174 if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) in sys_brk()
178 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) in sys_brk()
189 mm->brk = brk; in sys_brk()
191 retval = mm->brk; in sys_brk()
192 up_write(&mm->mmap_sem); in sys_brk()
229 static void validate_mm(struct mm_struct * mm) { in validate_mm() argument
232 struct vm_area_struct * tmp = mm->mmap; in validate_mm()
237 if (i != mm->map_count) in validate_mm()
238 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1; in validate_mm()
239 i = browse_rb(mm->mm_rb.rb_node); in validate_mm()
240 if (i != mm->map_count) in validate_mm()
241 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; in validate_mm()
246 #define validate_mm(mm) do { } while (0) argument
249 static struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned long addr, in find_vma_prepare() argument
256 __rb_link = &mm->mm_rb.rb_node; in find_vma_prepare()
285 static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_ar… in __vma_link_list() argument
292 mm->mmap = vma; in __vma_link_list()
300 static inline void __vma_link_rb(struct mm_struct * mm, struct vm_area_struct * vma, in __vma_link_rb() argument
304 rb_insert_color(&vma->vm_rb, &mm->mm_rb); in __vma_link_rb()
332 static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct *… in __vma_link() argument
335 __vma_link_list(mm, vma, prev, rb_parent); in __vma_link()
336 __vma_link_rb(mm, vma, rb_link, rb_parent); in __vma_link()
340 static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_stru… in vma_link() argument
344 spin_lock(&mm->page_table_lock); in vma_link()
345 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
346 spin_unlock(&mm->page_table_lock); in vma_link()
349 mm->map_count++; in vma_link()
350 validate_mm(mm); in vma_link()
353 static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev, in vma_merge() argument
356 spinlock_t * lock = &mm->page_table_lock; in vma_merge()
369 __vma_unlink(mm, next, prev); in vma_merge()
372 mm->map_count--; in vma_merge()
399 struct mm_struct * mm = current->mm; in do_mmap_pgoff() local
427 if (mm->map_count > max_map_count) in do_mmap_pgoff()
441 vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in do_mmap_pgoff()
445 unsigned long locked = mm->locked_vm << PAGE_SHIFT; in do_mmap_pgoff()
493 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); in do_mmap_pgoff()
495 if (do_munmap(mm, addr, len)) in do_mmap_pgoff()
501 if ((mm->total_vm << PAGE_SHIFT) + len in do_mmap_pgoff()
513 if (vma_merge(mm, prev, rb_parent, addr, addr + len, vm_flags)) in do_mmap_pgoff()
524 vma->vm_mm = mm; in do_mmap_pgoff()
574 stale_vma = find_vma_prepare(mm, addr, &prev, in do_mmap_pgoff()
586 vma_link(mm, vma, prev, rb_link, rb_parent); in do_mmap_pgoff()
591 mm->total_vm += len >> PAGE_SHIFT; in do_mmap_pgoff()
593 mm->locked_vm += len >> PAGE_SHIFT; in do_mmap_pgoff()
605 zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start); in do_mmap_pgoff()
632 vma = find_vma(current->mm, addr); in arch_get_unmapped_area()
639 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { in arch_get_unmapped_area()
681 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr) in find_vma() argument
685 if (mm) { in find_vma()
688 vma = mm->mmap_cache; in find_vma()
692 rb_node = mm->mm_rb.rb_node; in find_vma()
709 mm->mmap_cache = vma; in find_vma()
716 struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, in find_vma_prev() argument
719 if (mm) { in find_vma_prev()
724 rb_node = mm->mm_rb.rb_node; in find_vma_prev()
753 if ((rb_prev ? (*pprev)->vm_next : mm->mmap) != vma) in find_vma_prev()
762 struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr) in find_extend_vma() argument
768 vma = find_vma(mm,addr); in find_extend_vma()
807 static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, in unmap_fixup() argument
836 spin_lock(&mm->page_table_lock); in unmap_fixup()
842 spin_lock(&mm->page_table_lock); in unmap_fixup()
869 spin_lock(&mm->page_table_lock); in unmap_fixup()
870 __insert_vm_struct(mm, mpnt); in unmap_fixup()
873 __insert_vm_struct(mm, area); in unmap_fixup()
874 spin_unlock(&mm->page_table_lock); in unmap_fixup()
892 static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev, in free_pgtables() argument
900 prev = mm->mmap; in free_pgtables()
934 clear_page_tables(mm, start_index, end_index - start_index); in free_pgtables()
935 flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK); in free_pgtables()
944 int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) in do_munmap() argument
959 mpnt = find_vma_prev(mm, addr, &prev); in do_munmap()
969 && mm->map_count >= max_map_count) in do_munmap()
980 npp = (prev ? &prev->vm_next : &mm->mmap); in do_munmap()
982 spin_lock(&mm->page_table_lock); in do_munmap()
987 rb_erase(&mpnt->vm_rb, &mm->mm_rb); in do_munmap()
989 mm->mmap_cache = NULL; /* Kill the cache. */ in do_munmap()
990 spin_unlock(&mm->page_table_lock); in do_munmap()
1015 mm->map_count--; in do_munmap()
1017 zap_page_range(mm, st, size); in do_munmap()
1022 extra = unmap_fixup(mm, mpnt, st, size, extra); in do_munmap()
1026 validate_mm(mm); in do_munmap()
1032 free_pgtables(mm, prev, addr, addr+len); in do_munmap()
1040 struct mm_struct *mm = current->mm; in sys_munmap() local
1042 down_write(&mm->mmap_sem); in sys_munmap()
1043 ret = do_munmap(mm, addr, len); in sys_munmap()
1044 up_write(&mm->mmap_sem); in sys_munmap()
1049 static inline void verify_mmap_write_lock_held(struct mm_struct *mm) in verify_mmap_write_lock_held() argument
1051 if (down_read_trylock(&mm->mmap_sem)) { in verify_mmap_write_lock_held()
1053 up_read(&mm->mmap_sem); in verify_mmap_write_lock_held()
1064 struct mm_struct * mm = current->mm; in do_brk() local
1082 if (mm->def_flags & VM_LOCKED) { in do_brk()
1083 unsigned long locked = mm->locked_vm << PAGE_SHIFT; in do_brk()
1093 verify_mmap_write_lock_held(mm); in do_brk()
1099 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); in do_brk()
1101 if (do_munmap(mm, addr, len)) in do_brk()
1107 if ((mm->total_vm << PAGE_SHIFT) + len in do_brk()
1111 if (mm->map_count > max_map_count) in do_brk()
1117 flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags; in do_brk()
1120 if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags)) in do_brk()
1130 vma->vm_mm = mm; in do_brk()
1140 vma_link(mm, vma, prev, rb_link, rb_parent); in do_brk()
1143 mm->total_vm += len >> PAGE_SHIFT; in do_brk()
1145 mm->locked_vm += len >> PAGE_SHIFT; in do_brk()
1152 void build_mmap_rb(struct mm_struct * mm) in build_mmap_rb() argument
1157 mm->mm_rb = RB_ROOT; in build_mmap_rb()
1158 rb_link = &mm->mm_rb.rb_node; in build_mmap_rb()
1160 for (vma = mm->mmap; vma; vma = vma->vm_next) { in build_mmap_rb()
1161 __vma_link_rb(mm, vma, rb_link, rb_parent); in build_mmap_rb()
1168 void exit_mmap(struct mm_struct * mm) in exit_mmap() argument
1172 release_segments(mm); in exit_mmap()
1173 spin_lock(&mm->page_table_lock); in exit_mmap()
1174 mpnt = mm->mmap; in exit_mmap()
1175 mm->mmap = mm->mmap_cache = NULL; in exit_mmap()
1176 mm->mm_rb = RB_ROOT; in exit_mmap()
1177 mm->rss = 0; in exit_mmap()
1178 spin_unlock(&mm->page_table_lock); in exit_mmap()
1179 mm->total_vm = 0; in exit_mmap()
1180 mm->locked_vm = 0; in exit_mmap()
1182 flush_cache_mm(mm); in exit_mmap()
1193 mm->map_count--; in exit_mmap()
1195 zap_page_range(mm, start, size); in exit_mmap()
1203 if (mm->map_count) in exit_mmap()
1206 clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD); in exit_mmap()
1208 flush_tlb_mm(mm); in exit_mmap()
1215 void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) in __insert_vm_struct() argument
1220 __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent); in __insert_vm_struct()
1223 __vma_link(mm, vma, prev, rb_link, rb_parent); in __insert_vm_struct()
1224 mm->map_count++; in __insert_vm_struct()
1225 validate_mm(mm); in __insert_vm_struct()
1228 int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) in insert_vm_struct() argument
1233 __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent); in insert_vm_struct()
1236 vma_link(mm, vma, prev, rb_link, rb_parent); in insert_vm_struct()
1237 validate_mm(mm); in insert_vm_struct()