1 /*
2 * linux/mm/remap.c
3 *
4 * (C) Copyright 1996 Linus Torvalds
5 */
6
7 #include <linux/slab.h>
8 #include <linux/smp_lock.h>
9 #include <linux/shm.h>
10 #include <linux/mman.h>
11 #include <linux/swap.h>
12
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
15
16 extern int vm_enough_memory(long pages);
17
get_one_pte(struct mm_struct * mm,unsigned long addr)18 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
19 {
20 pgd_t * pgd;
21 pmd_t * pmd;
22 pte_t * pte = NULL;
23
24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd))
26 goto end;
27 if (pgd_bad(*pgd)) {
28 pgd_ERROR(*pgd);
29 pgd_clear(pgd);
30 goto end;
31 }
32
33 pmd = pmd_offset(pgd, addr);
34 if (pmd_none(*pmd))
35 goto end;
36 if (pmd_bad(*pmd)) {
37 pmd_ERROR(*pmd);
38 pmd_clear(pmd);
39 goto end;
40 }
41
42 pte = pte_offset(pmd, addr);
43 if (pte_none(*pte))
44 pte = NULL;
45 end:
46 return pte;
47 }
48
alloc_one_pte(struct mm_struct * mm,unsigned long addr)49 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
50 {
51 pmd_t * pmd;
52 pte_t * pte = NULL;
53
54 pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
55 if (pmd)
56 pte = pte_alloc(mm, pmd, addr);
57 return pte;
58 }
59
copy_one_pte(struct mm_struct * mm,pte_t * src,pte_t * dst)60 static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
61 {
62 int error = 0;
63 pte_t pte;
64
65 if (!pte_none(*src)) {
66 pte = ptep_get_and_clear(src);
67 if (!dst) {
68 /* No dest? We must put it back. */
69 dst = src;
70 error++;
71 }
72 set_pte(dst, pte);
73 }
74 return error;
75 }
76
move_one_page(struct mm_struct * mm,unsigned long old_addr,unsigned long new_addr)77 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
78 {
79 int error = 0;
80 pte_t * src, * dst;
81
82 spin_lock(&mm->page_table_lock);
83 src = get_one_pte(mm, old_addr);
84 if (src) {
85 dst = alloc_one_pte(mm, new_addr);
86 src = get_one_pte(mm, old_addr);
87 if (src)
88 error = copy_one_pte(mm, src, dst);
89 }
90 spin_unlock(&mm->page_table_lock);
91 return error;
92 }
93
move_page_tables(struct mm_struct * mm,unsigned long new_addr,unsigned long old_addr,unsigned long len)94 static int move_page_tables(struct mm_struct * mm,
95 unsigned long new_addr, unsigned long old_addr, unsigned long len)
96 {
97 unsigned long offset = len;
98
99 flush_cache_range(mm, old_addr, old_addr + len);
100
101 /*
102 * This is not the clever way to do this, but we're taking the
103 * easy way out on the assumption that most remappings will be
104 * only a few pages.. This also makes error recovery easier.
105 */
106 while (offset) {
107 offset -= PAGE_SIZE;
108 if (move_one_page(mm, old_addr + offset, new_addr + offset))
109 goto oops_we_failed;
110 }
111 flush_tlb_range(mm, old_addr, old_addr + len);
112 return 0;
113
114 /*
115 * Ok, the move failed because we didn't have enough pages for
116 * the new page table tree. This is unlikely, but we have to
117 * take the possibility into account. In that case we just move
118 * all the pages back (this will work, because we still have
119 * the old page tables)
120 */
121 oops_we_failed:
122 flush_cache_range(mm, new_addr, new_addr + len);
123 while ((offset += PAGE_SIZE) < len)
124 move_one_page(mm, new_addr + offset, old_addr + offset);
125 zap_page_range(mm, new_addr, len);
126 return -1;
127 }
128
move_vma(struct vm_area_struct * vma,unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long new_addr)129 static inline unsigned long move_vma(struct vm_area_struct * vma,
130 unsigned long addr, unsigned long old_len, unsigned long new_len,
131 unsigned long new_addr)
132 {
133 struct mm_struct * mm = vma->vm_mm;
134 struct vm_area_struct * new_vma, * next, * prev;
135 int allocated_vma;
136
137 new_vma = NULL;
138 next = find_vma_prev(mm, new_addr, &prev);
139 if (next) {
140 if (prev && prev->vm_end == new_addr &&
141 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
142 spin_lock(&mm->page_table_lock);
143 prev->vm_end = new_addr + new_len;
144 spin_unlock(&mm->page_table_lock);
145 new_vma = prev;
146 if (next != prev->vm_next)
147 BUG();
148 if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
149 spin_lock(&mm->page_table_lock);
150 prev->vm_end = next->vm_end;
151 __vma_unlink(mm, next, prev);
152 spin_unlock(&mm->page_table_lock);
153
154 mm->map_count--;
155 kmem_cache_free(vm_area_cachep, next);
156 }
157 } else if (next->vm_start == new_addr + new_len &&
158 can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
159 spin_lock(&mm->page_table_lock);
160 next->vm_start = new_addr;
161 spin_unlock(&mm->page_table_lock);
162 new_vma = next;
163 }
164 } else {
165 prev = find_vma(mm, new_addr-1);
166 if (prev && prev->vm_end == new_addr &&
167 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
168 spin_lock(&mm->page_table_lock);
169 prev->vm_end = new_addr + new_len;
170 spin_unlock(&mm->page_table_lock);
171 new_vma = prev;
172 }
173 }
174
175 allocated_vma = 0;
176 if (!new_vma) {
177 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
178 if (!new_vma)
179 goto out;
180 allocated_vma = 1;
181 }
182
183 if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
184 unsigned long vm_locked = vma->vm_flags & VM_LOCKED;
185
186 if (allocated_vma) {
187 *new_vma = *vma;
188 new_vma->vm_start = new_addr;
189 new_vma->vm_end = new_addr+new_len;
190 new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT;
191 new_vma->vm_raend = 0;
192 if (new_vma->vm_file)
193 get_file(new_vma->vm_file);
194 if (new_vma->vm_ops && new_vma->vm_ops->open)
195 new_vma->vm_ops->open(new_vma);
196 insert_vm_struct(current->mm, new_vma);
197 }
198
199 /* XXX: possible errors masked, mapping might remain */
200 do_munmap(current->mm, addr, old_len);
201
202 current->mm->total_vm += new_len >> PAGE_SHIFT;
203 if (vm_locked) {
204 current->mm->locked_vm += new_len >> PAGE_SHIFT;
205 if (new_len > old_len)
206 make_pages_present(new_addr + old_len,
207 new_addr + new_len);
208 }
209 return new_addr;
210 }
211 if (allocated_vma)
212 kmem_cache_free(vm_area_cachep, new_vma);
213 out:
214 return -ENOMEM;
215 }
216
217 /*
218 * Expand (or shrink) an existing mapping, potentially moving it at the
219 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
220 *
221 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
222 * This option implies MREMAP_MAYMOVE.
223 */
do_mremap(unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long flags,unsigned long new_addr)224 unsigned long do_mremap(unsigned long addr,
225 unsigned long old_len, unsigned long new_len,
226 unsigned long flags, unsigned long new_addr)
227 {
228 struct vm_area_struct *vma;
229 unsigned long ret = -EINVAL;
230
231 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
232 goto out;
233
234 if (addr & ~PAGE_MASK)
235 goto out;
236
237 old_len = PAGE_ALIGN(old_len);
238 new_len = PAGE_ALIGN(new_len);
239
240 if (old_len > TASK_SIZE || addr > TASK_SIZE - old_len)
241 goto out;
242
243 if (addr >= TASK_SIZE)
244 goto out;
245
246 /* new_addr is only valid if MREMAP_FIXED is specified */
247 if (flags & MREMAP_FIXED) {
248 if (new_addr & ~PAGE_MASK)
249 goto out;
250 if (!(flags & MREMAP_MAYMOVE))
251 goto out;
252
253 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
254 goto out;
255
256 if (new_addr >= TASK_SIZE)
257 goto out;
258
259 /*
260 * Allow new_len == 0 only if new_addr == addr
261 * to preserve truncation in place (that was working
262 * safe and some app may depend on it).
263 */
264 if (unlikely(!new_len && new_addr != addr))
265 goto out;
266
267 /* Check if the location we're moving into overlaps the
268 * old location at all, and fail if it does.
269 */
270 if ((new_addr <= addr) && (new_addr+new_len) > addr)
271 goto out;
272
273 if ((addr <= new_addr) && (addr+old_len) > new_addr)
274 goto out;
275
276 /* Ensure a non-privileged process is not trying to map
277 * lower pages.
278 */
279 if (new_addr < mmap_min_addr && !capable(CAP_SYS_RAWIO))
280 return -EPERM;
281
282 ret = do_munmap(current->mm, new_addr, new_len);
283 if (ret && new_len)
284 goto out;
285 }
286
287 /*
288 * Always allow a shrinking remap: that just unmaps
289 * the unnecessary pages..
290 */
291 if (old_len >= new_len) {
292 ret = do_munmap(current->mm, addr+new_len, old_len - new_len);
293 if (ret && old_len != new_len)
294 goto out;
295 ret = addr;
296 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
297 goto out;
298 }
299
300 /*
301 * Ok, we need to grow.. or relocate.
302 */
303 ret = -EFAULT;
304 vma = find_vma(current->mm, addr);
305 if (!vma || vma->vm_start > addr)
306 goto out;
307 /* We can't remap across vm area boundaries */
308 if (old_len > vma->vm_end - addr)
309 goto out;
310 if (vma->vm_flags & VM_DONTEXPAND) {
311 if (new_len > old_len)
312 goto out;
313 }
314 if (vma->vm_flags & VM_LOCKED) {
315 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
316 locked += new_len - old_len;
317 ret = -EAGAIN;
318 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
319 goto out;
320 }
321 ret = -ENOMEM;
322 if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
323 > current->rlim[RLIMIT_AS].rlim_cur)
324 goto out;
325 /* Private writable mapping? Check memory availability.. */
326 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
327 !(flags & MAP_NORESERVE) &&
328 !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
329 goto out;
330
331 /* old_len exactly to the end of the area..
332 * And we're not relocating the area.
333 */
334 if (old_len == vma->vm_end - addr &&
335 !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
336 (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
337 unsigned long max_addr = TASK_SIZE;
338 if (vma->vm_next)
339 max_addr = vma->vm_next->vm_start;
340 /* can we just expand the current mapping? */
341 if (max_addr - addr >= new_len) {
342 int pages = (new_len - old_len) >> PAGE_SHIFT;
343 spin_lock(&vma->vm_mm->page_table_lock);
344 vma->vm_end = addr + new_len;
345 spin_unlock(&vma->vm_mm->page_table_lock);
346 current->mm->total_vm += pages;
347 if (vma->vm_flags & VM_LOCKED) {
348 current->mm->locked_vm += pages;
349 make_pages_present(addr + old_len,
350 addr + new_len);
351 }
352 ret = addr;
353 goto out;
354 }
355 }
356
357 /*
358 * We weren't able to just expand or shrink the area,
359 * we need to create a new one and move it..
360 */
361 ret = -ENOMEM;
362 if (flags & MREMAP_MAYMOVE) {
363 if (!(flags & MREMAP_FIXED)) {
364 unsigned long map_flags = 0;
365 if (vma->vm_flags & VM_SHARED)
366 map_flags |= MAP_SHARED;
367
368 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff, map_flags);
369 ret = new_addr;
370 if (new_addr & ~PAGE_MASK)
371 goto out;
372 }
373 ret = move_vma(vma, addr, old_len, new_len, new_addr);
374 }
375 out:
376 return ret;
377 }
378
sys_mremap(unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long flags,unsigned long new_addr)379 asmlinkage unsigned long sys_mremap(unsigned long addr,
380 unsigned long old_len, unsigned long new_len,
381 unsigned long flags, unsigned long new_addr)
382 {
383 unsigned long ret;
384
385 down_write(¤t->mm->mmap_sem);
386 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
387 up_write(¤t->mm->mmap_sem);
388 return ret;
389 }
390