Lines Matching refs:mm

18 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)  in get_one_pte()  argument
24 pgd = pgd_offset(mm, addr); in get_one_pte()
49 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr) in alloc_one_pte() argument
54 pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr); in alloc_one_pte()
56 pte = pte_alloc(mm, pmd, addr); in alloc_one_pte()
60 static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst) in copy_one_pte() argument
77 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr) in move_one_page() argument
82 spin_lock(&mm->page_table_lock); in move_one_page()
83 src = get_one_pte(mm, old_addr); in move_one_page()
85 dst = alloc_one_pte(mm, new_addr); in move_one_page()
86 src = get_one_pte(mm, old_addr); in move_one_page()
88 error = copy_one_pte(mm, src, dst); in move_one_page()
90 spin_unlock(&mm->page_table_lock); in move_one_page()
94 static int move_page_tables(struct mm_struct * mm, in move_page_tables() argument
99 flush_cache_range(mm, old_addr, old_addr + len); in move_page_tables()
108 if (move_one_page(mm, old_addr + offset, new_addr + offset)) in move_page_tables()
111 flush_tlb_range(mm, old_addr, old_addr + len); in move_page_tables()
122 flush_cache_range(mm, new_addr, new_addr + len); in move_page_tables()
124 move_one_page(mm, new_addr + offset, old_addr + offset); in move_page_tables()
125 zap_page_range(mm, new_addr, len); in move_page_tables()
133 struct mm_struct * mm = vma->vm_mm; in move_vma() local
138 next = find_vma_prev(mm, new_addr, &prev); in move_vma()
142 spin_lock(&mm->page_table_lock); in move_vma()
144 spin_unlock(&mm->page_table_lock); in move_vma()
149 spin_lock(&mm->page_table_lock); in move_vma()
151 __vma_unlink(mm, next, prev); in move_vma()
152 spin_unlock(&mm->page_table_lock); in move_vma()
154 mm->map_count--; in move_vma()
159 spin_lock(&mm->page_table_lock); in move_vma()
161 spin_unlock(&mm->page_table_lock); in move_vma()
165 prev = find_vma(mm, new_addr-1); in move_vma()
168 spin_lock(&mm->page_table_lock); in move_vma()
170 spin_unlock(&mm->page_table_lock); in move_vma()
183 if (!move_page_tables(current->mm, new_addr, addr, old_len)) { in move_vma()
196 insert_vm_struct(current->mm, new_vma); in move_vma()
200 do_munmap(current->mm, addr, old_len); in move_vma()
202 current->mm->total_vm += new_len >> PAGE_SHIFT; in move_vma()
204 current->mm->locked_vm += new_len >> PAGE_SHIFT; in move_vma()
282 ret = do_munmap(current->mm, new_addr, new_len); in do_mremap()
292 ret = do_munmap(current->mm, addr+new_len, old_len - new_len); in do_mremap()
304 vma = find_vma(current->mm, addr); in do_mremap()
315 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT; in do_mremap()
322 if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len) in do_mremap()
346 current->mm->total_vm += pages; in do_mremap()
348 current->mm->locked_vm += pages; in do_mremap()
385 down_write(&current->mm->mmap_sem); in sys_mremap()
387 up_write(&current->mm->mmap_sem); in sys_mremap()