1 /*
2 * linux/mm/mmap.c
3 *
4 * Written by obz.
5 */
6 #include <linux/slab.h>
7 #include <linux/shm.h>
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/swap.h>
11 #include <linux/swapctl.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/fs.h>
16 #include <linux/personality.h>
17 #include <linux/mount.h>
18
19 #include <asm/uaccess.h>
20 #include <asm/pgalloc.h>
21
22 /*
23 * WARNING: the debugging will use recursive algorithms so never enable this
24 * unless you know what you are doing.
25 */
26 #undef DEBUG_MM_RB
27
28 /* description of effects of mapping type and prot in current implementation.
29 * this is due to the limited x86 page protection hardware. The expected
30 * behavior is in parens:
31 *
32 * map_type prot
33 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
34 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
35 * w: (no) no w: (no) no w: (yes) yes w: (no) no
36 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
37 *
38 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
39 * w: (no) no w: (no) no w: (copy) copy w: (no) no
40 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
41 *
42 */
43 pgprot_t protection_map[16] = {
44 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
45 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
46 };
47
48 int sysctl_overcommit_memory;
49 unsigned long mmap_min_addr; /* defaults to 0 = no protection */
50
51 int max_map_count = DEFAULT_MAX_MAP_COUNT;
52
53 /* Check that a process has enough memory to allocate a
54 * new virtual mapping.
55 */
vm_enough_memory(long pages)56 int vm_enough_memory(long pages)
57 {
58 /* Stupid algorithm to decide if we have enough memory: while
59 * simple, it hopefully works in most obvious cases.. Easy to
60 * fool it, but this should catch most mistakes.
61 */
62 /* 23/11/98 NJC: Somewhat less stupid version of algorithm,
63 * which tries to do "TheRightThing". Instead of using half of
64 * (buffers+cache), use the minimum values. Allow an extra 2%
65 * of num_physpages for safety margin.
66 */
67
68 unsigned long free;
69
70 /* Sometimes we want to use more memory than we have. */
71 if (sysctl_overcommit_memory)
72 return 1;
73
74 /* The page cache contains buffer pages these days.. */
75 free = page_cache_size;
76 free += nr_free_pages();
77 free += nr_swap_pages;
78
79 /*
80 * This double-counts: the nrpages are both in the page-cache
81 * and in the swapper space. At the same time, this compensates
82 * for the swap-space over-allocation (ie "nr_swap_pages" being
83 * too small.
84 */
85 free += swapper_space.nrpages;
86
87 /*
88 * The code below doesn't account for free space in the inode
89 * and dentry slab cache, slab cache fragmentation, inodes and
90 * dentries which will become freeable under VM load, etc.
91 * Lets just hope all these (complex) factors balance out...
92 */
93 free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT;
94 free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT;
95
96 return free > pages;
97 }
98
99 /* Remove one vm structure from the inode's i_mapping address space. */
__remove_shared_vm_struct(struct vm_area_struct * vma)100 static inline void __remove_shared_vm_struct(struct vm_area_struct *vma)
101 {
102 struct file * file = vma->vm_file;
103
104 if (file) {
105 struct inode *inode = file->f_dentry->d_inode;
106 if (vma->vm_flags & VM_DENYWRITE)
107 atomic_inc(&inode->i_writecount);
108 if(vma->vm_next_share)
109 vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
110 *vma->vm_pprev_share = vma->vm_next_share;
111 }
112 }
113
remove_shared_vm_struct(struct vm_area_struct * vma)114 static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
115 {
116 lock_vma_mappings(vma);
117 __remove_shared_vm_struct(vma);
118 unlock_vma_mappings(vma);
119 }
120
lock_vma_mappings(struct vm_area_struct * vma)121 void lock_vma_mappings(struct vm_area_struct *vma)
122 {
123 struct address_space *mapping;
124
125 mapping = NULL;
126 if (vma->vm_file)
127 mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
128 if (mapping)
129 spin_lock(&mapping->i_shared_lock);
130 }
131
unlock_vma_mappings(struct vm_area_struct * vma)132 void unlock_vma_mappings(struct vm_area_struct *vma)
133 {
134 struct address_space *mapping;
135
136 mapping = NULL;
137 if (vma->vm_file)
138 mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
139 if (mapping)
140 spin_unlock(&mapping->i_shared_lock);
141 }
142
143 /*
144 * sys_brk() for the most part doesn't need the global kernel
145 * lock, except when an application is doing something nasty
146 * like trying to un-brk an area that has already been mapped
147 * to a regular file. in this case, the unmapping will need
148 * to invoke file system routines that need the global lock.
149 */
sys_brk(unsigned long brk)150 asmlinkage unsigned long sys_brk(unsigned long brk)
151 {
152 unsigned long rlim, retval;
153 unsigned long newbrk, oldbrk;
154 struct mm_struct *mm = current->mm;
155
156 down_write(&mm->mmap_sem);
157
158 if (brk < mm->end_code)
159 goto out;
160 newbrk = PAGE_ALIGN(brk);
161 oldbrk = PAGE_ALIGN(mm->brk);
162 if (oldbrk == newbrk)
163 goto set_brk;
164
165 /* Always allow shrinking brk. */
166 if (brk <= mm->brk) {
167 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
168 goto set_brk;
169 goto out;
170 }
171
172 /* Check against rlimit.. */
173 rlim = current->rlim[RLIMIT_DATA].rlim_cur;
174 if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
175 goto out;
176
177 /* Check against existing mmap mappings. */
178 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
179 goto out;
180
181 /* Check if we have enough memory.. */
182 if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
183 goto out;
184
185 /* Ok, looks good - let it rip. */
186 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
187 goto out;
188 set_brk:
189 mm->brk = brk;
190 out:
191 retval = mm->brk;
192 up_write(&mm->mmap_sem);
193 return retval;
194 }
195
196 /* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
197 * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
198 * into "VM_xxx".
199 */
calc_vm_flags(unsigned long prot,unsigned long flags)200 static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flags)
201 {
202 #define _trans(x,bit1,bit2) \
203 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
204
205 unsigned long prot_bits, flag_bits;
206 prot_bits =
207 _trans(prot, PROT_READ, VM_READ) |
208 _trans(prot, PROT_WRITE, VM_WRITE) |
209 _trans(prot, PROT_EXEC, VM_EXEC);
210 flag_bits =
211 _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
212 _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
213 _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
214 return prot_bits | flag_bits;
215 #undef _trans
216 }
217
218 #ifdef DEBUG_MM_RB
browse_rb(rb_node_t * rb_node)219 static int browse_rb(rb_node_t * rb_node) {
220 int i = 0;
221 if (rb_node) {
222 i++;
223 i += browse_rb(rb_node->rb_left);
224 i += browse_rb(rb_node->rb_right);
225 }
226 return i;
227 }
228
validate_mm(struct mm_struct * mm)229 static void validate_mm(struct mm_struct * mm) {
230 int bug = 0;
231 int i = 0;
232 struct vm_area_struct * tmp = mm->mmap;
233 while (tmp) {
234 tmp = tmp->vm_next;
235 i++;
236 }
237 if (i != mm->map_count)
238 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
239 i = browse_rb(mm->mm_rb.rb_node);
240 if (i != mm->map_count)
241 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
242 if (bug)
243 BUG();
244 }
245 #else
246 #define validate_mm(mm) do { } while (0)
247 #endif
248
find_vma_prepare(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev,rb_node_t *** rb_link,rb_node_t ** rb_parent)249 static struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned long addr,
250 struct vm_area_struct ** pprev,
251 rb_node_t *** rb_link, rb_node_t ** rb_parent)
252 {
253 struct vm_area_struct * vma;
254 rb_node_t ** __rb_link, * __rb_parent, * rb_prev;
255
256 __rb_link = &mm->mm_rb.rb_node;
257 rb_prev = __rb_parent = NULL;
258 vma = NULL;
259
260 while (*__rb_link) {
261 struct vm_area_struct *vma_tmp;
262
263 __rb_parent = *__rb_link;
264 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
265
266 if (vma_tmp->vm_end > addr) {
267 vma = vma_tmp;
268 if (vma_tmp->vm_start <= addr)
269 return vma;
270 __rb_link = &__rb_parent->rb_left;
271 } else {
272 rb_prev = __rb_parent;
273 __rb_link = &__rb_parent->rb_right;
274 }
275 }
276
277 *pprev = NULL;
278 if (rb_prev)
279 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
280 *rb_link = __rb_link;
281 *rb_parent = __rb_parent;
282 return vma;
283 }
284
__vma_link_list(struct mm_struct * mm,struct vm_area_struct * vma,struct vm_area_struct * prev,rb_node_t * rb_parent)285 static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
286 rb_node_t * rb_parent)
287 {
288 if (prev) {
289 vma->vm_next = prev->vm_next;
290 prev->vm_next = vma;
291 } else {
292 mm->mmap = vma;
293 if (rb_parent)
294 vma->vm_next = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
295 else
296 vma->vm_next = NULL;
297 }
298 }
299
__vma_link_rb(struct mm_struct * mm,struct vm_area_struct * vma,rb_node_t ** rb_link,rb_node_t * rb_parent)300 static inline void __vma_link_rb(struct mm_struct * mm, struct vm_area_struct * vma,
301 rb_node_t ** rb_link, rb_node_t * rb_parent)
302 {
303 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
304 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
305 }
306
__vma_link_file(struct vm_area_struct * vma)307 static inline void __vma_link_file(struct vm_area_struct * vma)
308 {
309 struct file * file;
310
311 file = vma->vm_file;
312 if (file) {
313 struct inode * inode = file->f_dentry->d_inode;
314 struct address_space *mapping = inode->i_mapping;
315 struct vm_area_struct **head;
316
317 if (vma->vm_flags & VM_DENYWRITE)
318 atomic_dec(&inode->i_writecount);
319
320 head = &mapping->i_mmap;
321 if (vma->vm_flags & VM_SHARED)
322 head = &mapping->i_mmap_shared;
323
324 /* insert vma into inode's share list */
325 if((vma->vm_next_share = *head) != NULL)
326 (*head)->vm_pprev_share = &vma->vm_next_share;
327 *head = vma;
328 vma->vm_pprev_share = head;
329 }
330 }
331
__vma_link(struct mm_struct * mm,struct vm_area_struct * vma,struct vm_area_struct * prev,rb_node_t ** rb_link,rb_node_t * rb_parent)332 static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
333 rb_node_t ** rb_link, rb_node_t * rb_parent)
334 {
335 __vma_link_list(mm, vma, prev, rb_parent);
336 __vma_link_rb(mm, vma, rb_link, rb_parent);
337 __vma_link_file(vma);
338 }
339
vma_link(struct mm_struct * mm,struct vm_area_struct * vma,struct vm_area_struct * prev,rb_node_t ** rb_link,rb_node_t * rb_parent)340 static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
341 rb_node_t ** rb_link, rb_node_t * rb_parent)
342 {
343 lock_vma_mappings(vma);
344 spin_lock(&mm->page_table_lock);
345 __vma_link(mm, vma, prev, rb_link, rb_parent);
346 spin_unlock(&mm->page_table_lock);
347 unlock_vma_mappings(vma);
348
349 mm->map_count++;
350 validate_mm(mm);
351 }
352
vma_merge(struct mm_struct * mm,struct vm_area_struct * prev,rb_node_t * rb_parent,unsigned long addr,unsigned long end,unsigned long vm_flags)353 static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,
354 rb_node_t * rb_parent, unsigned long addr, unsigned long end, unsigned long vm_flags)
355 {
356 spinlock_t * lock = &mm->page_table_lock;
357 if (!prev) {
358 prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
359 goto merge_next;
360 }
361 if (prev->vm_end == addr && can_vma_merge(prev, vm_flags)) {
362 struct vm_area_struct * next;
363
364 spin_lock(lock);
365 prev->vm_end = end;
366 next = prev->vm_next;
367 if (next && prev->vm_end == next->vm_start && can_vma_merge(next, vm_flags)) {
368 prev->vm_end = next->vm_end;
369 __vma_unlink(mm, next, prev);
370 spin_unlock(lock);
371
372 mm->map_count--;
373 kmem_cache_free(vm_area_cachep, next);
374 return 1;
375 }
376 spin_unlock(lock);
377 return 1;
378 }
379
380 prev = prev->vm_next;
381 if (prev) {
382 merge_next:
383 if (!can_vma_merge(prev, vm_flags))
384 return 0;
385 if (end == prev->vm_start) {
386 spin_lock(lock);
387 prev->vm_start = addr;
388 spin_unlock(lock);
389 return 1;
390 }
391 }
392
393 return 0;
394 }
395
do_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,unsigned long pgoff)396 unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len,
397 unsigned long prot, unsigned long flags, unsigned long pgoff)
398 {
399 struct mm_struct * mm = current->mm;
400 struct vm_area_struct * vma, * prev;
401 unsigned int vm_flags;
402 int correct_wcount = 0;
403 int error;
404 rb_node_t ** rb_link, * rb_parent;
405
406 if (file) {
407 if (!file->f_op || !file->f_op->mmap)
408 return -ENODEV;
409
410 if ((prot & PROT_EXEC) && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
411 return -EPERM;
412 }
413
414 if (!len)
415 return addr;
416
417 len = PAGE_ALIGN(len);
418
419 if (len > TASK_SIZE || len == 0)
420 return -EINVAL;
421
422 /* offset overflow? */
423 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
424 return -EINVAL;
425
426 /* Too many mappings? */
427 if (mm->map_count > max_map_count)
428 return -ENOMEM;
429
430 /* Obtain the address to map to. we verify (or select) it and ensure
431 * that it represents a valid section of the address space.
432 */
433 addr = get_unmapped_area(file, addr, len, pgoff, flags);
434 if (addr & ~PAGE_MASK)
435 return addr;
436
437 /* Do simple checking here so the lower-level routines won't have
438 * to. we assume access permissions have been handled by the open
439 * of the memory object, so we don't do any here.
440 */
441 vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
442
443 /* mlock MCL_FUTURE? */
444 if (vm_flags & VM_LOCKED) {
445 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
446 locked += len;
447 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
448 return -EAGAIN;
449 }
450
451 if (file) {
452 switch (flags & MAP_TYPE) {
453 case MAP_SHARED:
454 if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE))
455 return -EACCES;
456
457 /* Make sure we don't allow writing to an append-only file.. */
458 if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & FMODE_WRITE))
459 return -EACCES;
460
461 /* make sure there are no mandatory locks on the file. */
462 if (locks_verify_locked(file->f_dentry->d_inode))
463 return -EAGAIN;
464
465 vm_flags |= VM_SHARED | VM_MAYSHARE;
466 if (!(file->f_mode & FMODE_WRITE))
467 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
468
469 /* fall through */
470 case MAP_PRIVATE:
471 if (!(file->f_mode & FMODE_READ))
472 return -EACCES;
473 break;
474
475 default:
476 return -EINVAL;
477 }
478 } else {
479 vm_flags |= VM_SHARED | VM_MAYSHARE;
480 switch (flags & MAP_TYPE) {
481 default:
482 return -EINVAL;
483 case MAP_PRIVATE:
484 vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
485 /* fall through */
486 case MAP_SHARED:
487 break;
488 }
489 }
490
491 /* Clear old maps */
492 munmap_back:
493 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
494 if (vma && vma->vm_start < addr + len) {
495 if (do_munmap(mm, addr, len))
496 return -ENOMEM;
497 goto munmap_back;
498 }
499
500 /* Check against address space limit. */
501 if ((mm->total_vm << PAGE_SHIFT) + len
502 > current->rlim[RLIMIT_AS].rlim_cur)
503 return -ENOMEM;
504
505 /* Private writable mapping? Check memory availability.. */
506 if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
507 !(flags & MAP_NORESERVE) &&
508 !vm_enough_memory(len >> PAGE_SHIFT))
509 return -ENOMEM;
510
511 /* Can we just expand an old anonymous mapping? */
512 if (!file && !(vm_flags & VM_SHARED) && rb_parent)
513 if (vma_merge(mm, prev, rb_parent, addr, addr + len, vm_flags))
514 goto out;
515
516 /* Determine the object being mapped and call the appropriate
517 * specific mapper. the address has already been validated, but
518 * not unmapped, but the maps are removed from the list.
519 */
520 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
521 if (!vma)
522 return -ENOMEM;
523
524 vma->vm_mm = mm;
525 vma->vm_start = addr;
526 vma->vm_end = addr + len;
527 vma->vm_flags = vm_flags;
528 vma->vm_page_prot = protection_map[vm_flags & 0x0f];
529 vma->vm_ops = NULL;
530 vma->vm_pgoff = pgoff;
531 vma->vm_file = NULL;
532 vma->vm_private_data = NULL;
533 vma->vm_raend = 0;
534
535 if (file) {
536 error = -EINVAL;
537 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
538 goto free_vma;
539 if (vm_flags & VM_DENYWRITE) {
540 error = deny_write_access(file);
541 if (error)
542 goto free_vma;
543 correct_wcount = 1;
544 }
545 vma->vm_file = file;
546 get_file(file);
547 error = file->f_op->mmap(file, vma);
548 if (error)
549 goto unmap_and_free_vma;
550 } else if (flags & MAP_SHARED) {
551 error = shmem_zero_setup(vma);
552 if (error)
553 goto free_vma;
554 }
555
556 /* Can addr have changed??
557 *
558 * Answer: Yes, several device drivers can do it in their
559 * f_op->mmap method. -DaveM
560 */
561 if (addr != vma->vm_start) {
562 /*
563 * It is a bit too late to pretend changing the virtual
564 * area of the mapping, we just corrupted userspace
565 * in the do_munmap, so FIXME (not in 2.4 to avoid breaking
566 * the driver API).
567 */
568 struct vm_area_struct * stale_vma;
569 /* Since addr changed, we rely on the mmap op to prevent
570 * collisions with existing vmas and just use find_vma_prepare
571 * to update the tree pointers.
572 */
573 addr = vma->vm_start;
574 stale_vma = find_vma_prepare(mm, addr, &prev,
575 &rb_link, &rb_parent);
576 /*
577 * Make sure the lowlevel driver did its job right.
578 */
579 if (unlikely(stale_vma && stale_vma->vm_start < vma->vm_end)) {
580 printk(KERN_ERR "buggy mmap operation: [<%p>]\n",
581 file ? file->f_op->mmap : NULL);
582 BUG();
583 }
584 }
585
586 vma_link(mm, vma, prev, rb_link, rb_parent);
587 if (correct_wcount)
588 atomic_inc(&file->f_dentry->d_inode->i_writecount);
589
590 out:
591 mm->total_vm += len >> PAGE_SHIFT;
592 if (vm_flags & VM_LOCKED) {
593 mm->locked_vm += len >> PAGE_SHIFT;
594 make_pages_present(addr, addr + len);
595 }
596 return addr;
597
598 unmap_and_free_vma:
599 if (correct_wcount)
600 atomic_inc(&file->f_dentry->d_inode->i_writecount);
601 vma->vm_file = NULL;
602 fput(file);
603
604 /* Undo any partial mapping done by a device driver. */
605 zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
606 free_vma:
607 kmem_cache_free(vm_area_cachep, vma);
608 return error;
609 }
610
611 /* Get an address range which is currently unmapped.
612 * For shmat() with addr=0.
613 *
614 * Ugly calling convention alert:
615 * Return value with the low bits set means error value,
616 * ie
617 * if (ret & ~PAGE_MASK)
618 * error = ret;
619 *
620 * This function "knows" that -ENOMEM has the bits set.
621 */
622 #ifndef HAVE_ARCH_UNMAPPED_AREA
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)623 static inline unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
624 {
625 struct vm_area_struct *vma;
626
627 if (len > TASK_SIZE)
628 return -ENOMEM;
629
630 if (addr) {
631 addr = PAGE_ALIGN(addr);
632 vma = find_vma(current->mm, addr);
633 if (TASK_SIZE - len >= addr &&
634 (!vma || addr + len <= vma->vm_start))
635 return addr;
636 }
637 addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
638
639 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
640 /* At this point: (!vma || addr < vma->vm_end). */
641 if (TASK_SIZE - len < addr)
642 return -ENOMEM;
643 if (!vma || addr + len <= vma->vm_start)
644 return addr;
645 addr = vma->vm_end;
646 }
647 }
648 #else
649 extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
650 #endif
651
get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)652 unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
653 {
654 if (flags & MAP_FIXED) {
655 if (addr > TASK_SIZE - len || addr >= TASK_SIZE)
656 return -ENOMEM;
657 if (addr & ~PAGE_MASK)
658 return -EINVAL;
659
660 /* Ensure a non-privileged process is not trying to map
661 * lower pages.
662 */
663 if (addr < mmap_min_addr && !capable(CAP_SYS_RAWIO))
664 return -EPERM;
665
666 return addr;
667 }
668
669 if (file && file->f_op && file->f_op->get_unmapped_area)
670 addr = file->f_op->get_unmapped_area(file, addr, len, pgoff, flags);
671 else
672 addr = arch_get_unmapped_area(file, addr, len, pgoff, flags);
673
674 if (addr < mmap_min_addr && !capable(CAP_SYS_RAWIO))
675 return -ENOMEM;
676
677 return addr;
678 }
679
680 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
find_vma(struct mm_struct * mm,unsigned long addr)681 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
682 {
683 struct vm_area_struct *vma = NULL;
684
685 if (mm) {
686 /* Check the cache first. */
687 /* (Cache hit rate is typically around 35%.) */
688 vma = mm->mmap_cache;
689 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
690 rb_node_t * rb_node;
691
692 rb_node = mm->mm_rb.rb_node;
693 vma = NULL;
694
695 while (rb_node) {
696 struct vm_area_struct * vma_tmp;
697
698 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
699
700 if (vma_tmp->vm_end > addr) {
701 vma = vma_tmp;
702 if (vma_tmp->vm_start <= addr)
703 break;
704 rb_node = rb_node->rb_left;
705 } else
706 rb_node = rb_node->rb_right;
707 }
708 if (vma)
709 mm->mmap_cache = vma;
710 }
711 }
712 return vma;
713 }
714
715 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
find_vma_prev(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev)716 struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
717 struct vm_area_struct **pprev)
718 {
719 if (mm) {
720 /* Go through the RB tree quickly. */
721 struct vm_area_struct * vma;
722 rb_node_t * rb_node, * rb_last_right, * rb_prev;
723
724 rb_node = mm->mm_rb.rb_node;
725 rb_last_right = rb_prev = NULL;
726 vma = NULL;
727
728 while (rb_node) {
729 struct vm_area_struct * vma_tmp;
730
731 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
732
733 if (vma_tmp->vm_end > addr) {
734 vma = vma_tmp;
735 rb_prev = rb_last_right;
736 if (vma_tmp->vm_start <= addr)
737 break;
738 rb_node = rb_node->rb_left;
739 } else {
740 rb_last_right = rb_node;
741 rb_node = rb_node->rb_right;
742 }
743 }
744 if (vma) {
745 if (vma->vm_rb.rb_left) {
746 rb_prev = vma->vm_rb.rb_left;
747 while (rb_prev->rb_right)
748 rb_prev = rb_prev->rb_right;
749 }
750 *pprev = NULL;
751 if (rb_prev)
752 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
753 if ((rb_prev ? (*pprev)->vm_next : mm->mmap) != vma)
754 BUG();
755 return vma;
756 }
757 }
758 *pprev = NULL;
759 return NULL;
760 }
761
find_extend_vma(struct mm_struct * mm,unsigned long addr)762 struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr)
763 {
764 struct vm_area_struct * vma;
765 unsigned long start;
766
767 addr &= PAGE_MASK;
768 vma = find_vma(mm,addr);
769 if (!vma)
770 return NULL;
771 if (vma->vm_start <= addr)
772 return vma;
773 if (!(vma->vm_flags & VM_GROWSDOWN))
774 return NULL;
775 start = vma->vm_start;
776 if (expand_stack(vma, addr))
777 return NULL;
778 if (vma->vm_flags & VM_LOCKED) {
779 make_pages_present(addr, start);
780 }
781 return vma;
782 }
783
784 /* Normal function to fix up a mapping
785 * This function is the default for when an area has no specific
786 * function. This may be used as part of a more specific routine.
787 * This function works out what part of an area is affected and
788 * adjusts the mapping information. Since the actual page
789 * manipulation is done in do_mmap(), none need be done here,
790 * though it would probably be more appropriate.
791 *
792 * By the time this function is called, the area struct has been
793 * removed from the process mapping list, so it needs to be
794 * reinserted if necessary.
795 *
796 * The 4 main cases are:
797 * Unmapping the whole area
798 * Unmapping from the start of the segment to a point in it
799 * Unmapping from an intermediate point to the end
800 * Unmapping between to intermediate points, making a hole.
801 *
802 * Case 4 involves the creation of 2 new areas, for each side of
803 * the hole. If possible, we reuse the existing area rather than
804 * allocate a new one, and the return indicates whether the old
805 * area was reused.
806 */
unmap_fixup(struct mm_struct * mm,struct vm_area_struct * area,unsigned long addr,size_t len,struct vm_area_struct * extra)807 static struct vm_area_struct * unmap_fixup(struct mm_struct *mm,
808 struct vm_area_struct *area, unsigned long addr, size_t len,
809 struct vm_area_struct *extra)
810 {
811 struct vm_area_struct *mpnt;
812 unsigned long end = addr + len;
813
814 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
815 if (area->vm_flags & VM_LOCKED)
816 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
817
818 /* Unmapping the whole area. */
819 if (addr == area->vm_start && end == area->vm_end) {
820 if (area->vm_ops && area->vm_ops->close)
821 area->vm_ops->close(area);
822 if (area->vm_file)
823 fput(area->vm_file);
824 kmem_cache_free(vm_area_cachep, area);
825 return extra;
826 }
827
828 /* Work out to one of the ends. */
829 if (end == area->vm_end) {
830 /*
831 * here area isn't visible to the semaphore-less readers
832 * so we don't need to update it under the spinlock.
833 */
834 area->vm_end = addr;
835 lock_vma_mappings(area);
836 spin_lock(&mm->page_table_lock);
837 } else if (addr == area->vm_start) {
838 area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT;
839 /* same locking considerations of the above case */
840 area->vm_start = end;
841 lock_vma_mappings(area);
842 spin_lock(&mm->page_table_lock);
843 } else {
844 /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
845 /* Add end mapping -- leave beginning for below */
846 mpnt = extra;
847 extra = NULL;
848
849 mpnt->vm_mm = area->vm_mm;
850 mpnt->vm_start = end;
851 mpnt->vm_end = area->vm_end;
852 mpnt->vm_page_prot = area->vm_page_prot;
853 mpnt->vm_flags = area->vm_flags;
854 mpnt->vm_raend = 0;
855 mpnt->vm_ops = area->vm_ops;
856 mpnt->vm_pgoff = area->vm_pgoff + ((end - area->vm_start) >> PAGE_SHIFT);
857 mpnt->vm_file = area->vm_file;
858 mpnt->vm_private_data = area->vm_private_data;
859 if (mpnt->vm_file)
860 get_file(mpnt->vm_file);
861 if (mpnt->vm_ops && mpnt->vm_ops->open)
862 mpnt->vm_ops->open(mpnt);
863 area->vm_end = addr; /* Truncate area */
864
865 /* Because mpnt->vm_file == area->vm_file this locks
866 * things correctly.
867 */
868 lock_vma_mappings(area);
869 spin_lock(&mm->page_table_lock);
870 __insert_vm_struct(mm, mpnt);
871 }
872
873 __insert_vm_struct(mm, area);
874 spin_unlock(&mm->page_table_lock);
875 unlock_vma_mappings(area);
876 return extra;
877 }
878
879 /*
880 * Try to free as many page directory entries as we can,
881 * without having to work very hard at actually scanning
882 * the page tables themselves.
883 *
884 * Right now we try to free page tables if we have a nice
885 * PGDIR-aligned area that got free'd up. We could be more
886 * granular if we want to, but this is fast and simple,
887 * and covers the bad cases.
888 *
889 * "prev", if it exists, points to a vma before the one
890 * we just free'd - but there's no telling how much before.
891 */
free_pgtables(struct mm_struct * mm,struct vm_area_struct * prev,unsigned long start,unsigned long end)892 static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
893 unsigned long start, unsigned long end)
894 {
895 unsigned long first = start & PGDIR_MASK;
896 unsigned long last = end + PGDIR_SIZE - 1;
897 unsigned long start_index, end_index;
898
899 if (!prev) {
900 prev = mm->mmap;
901 if (!prev)
902 goto no_mmaps;
903 if (prev->vm_end > start) {
904 if (last > prev->vm_start)
905 last = prev->vm_start;
906 goto no_mmaps;
907 }
908 }
909 for (;;) {
910 struct vm_area_struct *next = prev->vm_next;
911
912 if (next) {
913 if (next->vm_start < start) {
914 prev = next;
915 continue;
916 }
917 if (last > next->vm_start)
918 last = next->vm_start;
919 }
920 if (prev->vm_end > first)
921 first = prev->vm_end + PGDIR_SIZE - 1;
922 break;
923 }
924 no_mmaps:
925 if (last < first)
926 return;
927 /*
928 * If the PGD bits are not consecutive in the virtual address, the
929 * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
930 */
931 start_index = pgd_index(first);
932 end_index = pgd_index(last);
933 if (end_index > start_index) {
934 clear_page_tables(mm, start_index, end_index - start_index);
935 flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
936 }
937 }
938
939 /* Munmap is split into 2 main parts -- this part which finds
940 * what needs doing, and the areas themselves, which do the
941 * work. This now handles partial unmappings.
942 * Jeremy Fitzhardine <jeremy@sw.oz.au>
943 */
do_munmap(struct mm_struct * mm,unsigned long addr,size_t len)944 int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
945 {
946 struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
947
948 if ((addr & ~PAGE_MASK) || addr >= TASK_SIZE || len > TASK_SIZE-addr)
949 return -EINVAL;
950
951 if ((len = PAGE_ALIGN(len)) == 0)
952 return -EINVAL;
953
954 /* Check if this memory area is ok - put it on the temporary
955 * list if so.. The checks here are pretty simple --
956 * every area affected in some way (by any overlap) is put
957 * on the list. If nothing is put on, nothing is affected.
958 */
959 mpnt = find_vma_prev(mm, addr, &prev);
960 if (!mpnt)
961 return 0;
962 /* we have addr < mpnt->vm_end */
963
964 if (mpnt->vm_start >= addr+len)
965 return 0;
966
967 /* If we'll make "hole", check the vm areas limit */
968 if ((mpnt->vm_start < addr && mpnt->vm_end > addr+len)
969 && mm->map_count >= max_map_count)
970 return -ENOMEM;
971
972 /*
973 * We may need one additional vma to fix up the mappings ...
974 * and this is the last chance for an easy error exit.
975 */
976 extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
977 if (!extra)
978 return -ENOMEM;
979
980 npp = (prev ? &prev->vm_next : &mm->mmap);
981 free = NULL;
982 spin_lock(&mm->page_table_lock);
983 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
984 *npp = mpnt->vm_next;
985 mpnt->vm_next = free;
986 free = mpnt;
987 rb_erase(&mpnt->vm_rb, &mm->mm_rb);
988 }
989 mm->mmap_cache = NULL; /* Kill the cache. */
990 spin_unlock(&mm->page_table_lock);
991
992 /* Ok - we have the memory areas we should free on the 'free' list,
993 * so release them, and unmap the page range..
994 * If the one of the segments is only being partially unmapped,
995 * it will put new vm_area_struct(s) into the address space.
996 * In that case we have to be careful with VM_DENYWRITE.
997 */
998 while ((mpnt = free) != NULL) {
999 unsigned long st, end, size;
1000 struct file *file = NULL;
1001
1002 free = free->vm_next;
1003
1004 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
1005 end = addr+len;
1006 end = end > mpnt->vm_end ? mpnt->vm_end : end;
1007 size = end - st;
1008
1009 if (mpnt->vm_flags & VM_DENYWRITE &&
1010 (st != mpnt->vm_start || end != mpnt->vm_end) &&
1011 (file = mpnt->vm_file) != NULL) {
1012 atomic_dec(&file->f_dentry->d_inode->i_writecount);
1013 }
1014 remove_shared_vm_struct(mpnt);
1015 mm->map_count--;
1016
1017 zap_page_range(mm, st, size);
1018
1019 /*
1020 * Fix the mapping, and free the old area if it wasn't reused.
1021 */
1022 extra = unmap_fixup(mm, mpnt, st, size, extra);
1023 if (file)
1024 atomic_inc(&file->f_dentry->d_inode->i_writecount);
1025 }
1026 validate_mm(mm);
1027
1028 /* Release the extra vma struct if it wasn't used */
1029 if (extra)
1030 kmem_cache_free(vm_area_cachep, extra);
1031
1032 free_pgtables(mm, prev, addr, addr+len);
1033
1034 return 0;
1035 }
1036
sys_munmap(unsigned long addr,size_t len)1037 asmlinkage long sys_munmap(unsigned long addr, size_t len)
1038 {
1039 int ret;
1040 struct mm_struct *mm = current->mm;
1041
1042 down_write(&mm->mmap_sem);
1043 ret = do_munmap(mm, addr, len);
1044 up_write(&mm->mmap_sem);
1045 return ret;
1046 }
1047
1048
verify_mmap_write_lock_held(struct mm_struct * mm)1049 static inline void verify_mmap_write_lock_held(struct mm_struct *mm)
1050 {
1051 if (down_read_trylock(&mm->mmap_sem)) {
1052 WARN_ON(1);
1053 up_read(&mm->mmap_sem);
1054 }
1055 }
1056
1057 /*
1058 * this is really a simplified "do_mmap". it only handles
1059 * anonymous maps. eventually we may be able to do some
1060 * brk-specific accounting here.
1061 */
do_brk(unsigned long addr,unsigned long len)1062 unsigned long do_brk(unsigned long addr, unsigned long len)
1063 {
1064 struct mm_struct * mm = current->mm;
1065 struct vm_area_struct * vma, * prev;
1066 unsigned long flags;
1067 rb_node_t ** rb_link, * rb_parent;
1068
1069 len = PAGE_ALIGN(len);
1070 if (!len)
1071 return addr;
1072
1073 if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1074 return -EINVAL;
1075
1076 if (addr < mmap_min_addr && !capable(CAP_SYS_RAWIO))
1077 return -ENOMEM;
1078
1079 /*
1080 * mlock MCL_FUTURE?
1081 */
1082 if (mm->def_flags & VM_LOCKED) {
1083 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
1084 locked += len;
1085 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
1086 return -EAGAIN;
1087 }
1088
1089 /*
1090 * mm->mmap_sem is required to protect against another thread
1091 * changing the mappings while we sleep (on kmalloc for one).
1092 */
1093 verify_mmap_write_lock_held(mm);
1094
1095 /*
1096 * Clear old maps. this also does some error checking for us
1097 */
1098 munmap_back:
1099 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1100 if (vma && vma->vm_start < addr + len) {
1101 if (do_munmap(mm, addr, len))
1102 return -ENOMEM;
1103 goto munmap_back;
1104 }
1105
1106 /* Check against address space limits *after* clearing old maps... */
1107 if ((mm->total_vm << PAGE_SHIFT) + len
1108 > current->rlim[RLIMIT_AS].rlim_cur)
1109 return -ENOMEM;
1110
1111 if (mm->map_count > max_map_count)
1112 return -ENOMEM;
1113
1114 if (!vm_enough_memory(len >> PAGE_SHIFT))
1115 return -ENOMEM;
1116
1117 flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags;
1118
1119 /* Can we just expand an old anonymous mapping? */
1120 if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags))
1121 goto out;
1122
1123 /*
1124 * create a vma struct for an anonymous mapping
1125 */
1126 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1127 if (!vma)
1128 return -ENOMEM;
1129
1130 vma->vm_mm = mm;
1131 vma->vm_start = addr;
1132 vma->vm_end = addr + len;
1133 vma->vm_flags = flags;
1134 vma->vm_page_prot = protection_map[flags & 0x0f];
1135 vma->vm_ops = NULL;
1136 vma->vm_pgoff = 0;
1137 vma->vm_file = NULL;
1138 vma->vm_private_data = NULL;
1139
1140 vma_link(mm, vma, prev, rb_link, rb_parent);
1141
1142 out:
1143 mm->total_vm += len >> PAGE_SHIFT;
1144 if (flags & VM_LOCKED) {
1145 mm->locked_vm += len >> PAGE_SHIFT;
1146 make_pages_present(addr, addr + len);
1147 }
1148 return addr;
1149 }
1150
1151 /* Build the RB tree corresponding to the VMA list. */
build_mmap_rb(struct mm_struct * mm)1152 void build_mmap_rb(struct mm_struct * mm)
1153 {
1154 struct vm_area_struct * vma;
1155 rb_node_t ** rb_link, * rb_parent;
1156
1157 mm->mm_rb = RB_ROOT;
1158 rb_link = &mm->mm_rb.rb_node;
1159 rb_parent = NULL;
1160 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1161 __vma_link_rb(mm, vma, rb_link, rb_parent);
1162 rb_parent = &vma->vm_rb;
1163 rb_link = &rb_parent->rb_right;
1164 }
1165 }
1166
1167 /* Release all mmaps. */
exit_mmap(struct mm_struct * mm)1168 void exit_mmap(struct mm_struct * mm)
1169 {
1170 struct vm_area_struct * mpnt;
1171
1172 release_segments(mm);
1173 spin_lock(&mm->page_table_lock);
1174 mpnt = mm->mmap;
1175 mm->mmap = mm->mmap_cache = NULL;
1176 mm->mm_rb = RB_ROOT;
1177 mm->rss = 0;
1178 spin_unlock(&mm->page_table_lock);
1179 mm->total_vm = 0;
1180 mm->locked_vm = 0;
1181
1182 flush_cache_mm(mm);
1183 while (mpnt) {
1184 struct vm_area_struct * next = mpnt->vm_next;
1185 unsigned long start = mpnt->vm_start;
1186 unsigned long end = mpnt->vm_end;
1187 unsigned long size = end - start;
1188
1189 if (mpnt->vm_ops) {
1190 if (mpnt->vm_ops->close)
1191 mpnt->vm_ops->close(mpnt);
1192 }
1193 mm->map_count--;
1194 remove_shared_vm_struct(mpnt);
1195 zap_page_range(mm, start, size);
1196 if (mpnt->vm_file)
1197 fput(mpnt->vm_file);
1198 kmem_cache_free(vm_area_cachep, mpnt);
1199 mpnt = next;
1200 }
1201
1202 /* This is just debugging */
1203 if (mm->map_count)
1204 BUG();
1205
1206 clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
1207
1208 flush_tlb_mm(mm);
1209 }
1210
1211 /* Insert vm structure into process list sorted by address
1212 * and into the inode's i_mmap ring. If vm_file is non-NULL
1213 * then the i_shared_lock must be held here.
1214 */
__insert_vm_struct(struct mm_struct * mm,struct vm_area_struct * vma)1215 void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1216 {
1217 struct vm_area_struct * __vma, * prev;
1218 rb_node_t ** rb_link, * rb_parent;
1219
1220 __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
1221 if (__vma && __vma->vm_start < vma->vm_end)
1222 BUG();
1223 __vma_link(mm, vma, prev, rb_link, rb_parent);
1224 mm->map_count++;
1225 validate_mm(mm);
1226 }
1227
insert_vm_struct(struct mm_struct * mm,struct vm_area_struct * vma)1228 int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1229 {
1230 struct vm_area_struct * __vma, * prev;
1231 rb_node_t ** rb_link, * rb_parent;
1232
1233 __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
1234 if (__vma && __vma->vm_start < vma->vm_end)
1235 return -ENOMEM;
1236 vma_link(mm, vma, prev, rb_link, rb_parent);
1237 validate_mm(mm);
1238 return 0;
1239 }
1240