/linux-2.4.37.9/mm/ |
D | mprotect.c | 100 if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) && in mprotect_fixup_all() 103 prev->vm_end = vma->vm_end; in mprotect_fixup_all() 131 if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) && in mprotect_fixup_start() 134 prev->vm_end = end; in mprotect_fixup_start() 144 n->vm_end = end; in mprotect_fixup_start() 184 vma->vm_end = start; in mprotect_fixup_end() 210 left->vm_end = start; in mprotect_fixup_middle() 227 vma->vm_end = end; in mprotect_fixup_middle() 251 if (end == vma->vm_end) in mprotect_fixup() 255 } else if (end == vma->vm_end) in mprotect_fixup() [all …]
|
D | mmap.c | 266 if (vma_tmp->vm_end > addr) { in find_vma_prepare() 361 if (prev->vm_end == addr && can_vma_merge(prev, vm_flags)) { in vma_merge() 365 prev->vm_end = end; in vma_merge() 367 if (next && prev->vm_end == next->vm_start && can_vma_merge(next, vm_flags)) { in vma_merge() 368 prev->vm_end = next->vm_end; in vma_merge() 526 vma->vm_end = addr + len; in do_mmap_pgoff() 579 if (unlikely(stale_vma && stale_vma->vm_start < vma->vm_end)) { in do_mmap_pgoff() 605 zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start); in do_mmap_pgoff() 645 addr = vma->vm_end; in arch_get_unmapped_area() 689 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { in find_vma() [all …]
|
D | mremap.c | 140 if (prev && prev->vm_end == new_addr && in move_vma() 143 prev->vm_end = new_addr + new_len; in move_vma() 148 if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) { in move_vma() 150 prev->vm_end = next->vm_end; in move_vma() 166 if (prev && prev->vm_end == new_addr && in move_vma() 169 prev->vm_end = new_addr + new_len; in move_vma() 189 new_vma->vm_end = new_addr+new_len; in move_vma() 308 if (old_len > vma->vm_end - addr) in do_mremap() 334 if (old_len == vma->vm_end - addr && in do_mremap() 344 vma->vm_end = addr + new_len; in do_mremap()
|
D | mlock.c | 32 n->vm_end = end; in mlock_fixup_start() 68 vma->vm_end = start; in mlock_fixup_end() 90 left->vm_end = start; in mlock_fixup_middle() 108 vma->vm_end = end; in mlock_fixup_middle() 126 if (end == vma->vm_end) in mlock_fixup() 131 if (end == vma->vm_end) in mlock_fixup() 175 if (vma->vm_end >= end) { in do_mlock() 180 tmp = vma->vm_end; in do_mlock() 259 error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags); in do_mlockall()
|
D | filemap.c | 2025 if (end > ((vma->vm_end >> PAGE_SHIFT) + vma->vm_pgoff)) in nopage_sequential_readahead() 2026 end = (vma->vm_end >> PAGE_SHIFT) + vma->vm_pgoff; in nopage_sequential_readahead() 2072 endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; in filemap_nopage() 2419 if (end <= vma->vm_end) { in sys_msync() 2429 error = msync_interval(vma, start, vma->vm_end, flags); in sys_msync() 2432 start = vma->vm_end; in sys_msync() 2467 n->vm_end = end; in madvise_fixup_start() 2504 vma->vm_end = start; in madvise_fixup_end() 2527 left->vm_end = start; in madvise_fixup_middle() 2544 vma->vm_end = end; in madvise_fixup_middle() [all …]
|
/linux-2.4.37.9/drivers/char/drm-4.0/ |
D | vm.c | 92 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ in drm_vm_shm_nopage() 124 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ in drm_vm_shm_nopage_lock() 159 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ in drm_vm_dma_nopage() 184 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_open() 214 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_close() 242 unsigned long length = vma->vm_end - vma->vm_start; in drm_mmap_dma() 248 vma->vm_start, vma->vm_end, VM_OFFSET(vma)); in drm_mmap_dma() 279 vma->vm_start, vma->vm_end, VM_OFFSET(vma)); in drm_mmap() 300 if (map->size != vma->vm_end - vma->vm_start) return -EINVAL; in drm_mmap() 333 vma->vm_end - vma->vm_start, in drm_mmap() [all …]
|
/linux-2.4.37.9/drivers/char/drm/ |
D | drm_vm.h | 134 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ in DRM() 163 vma->vm_start, vma->vm_end - vma->vm_start); in DRM() 236 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ in DRM() 264 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ in DRM() 284 vma->vm_start, vma->vm_end - vma->vm_start); in DRM() 305 vma->vm_start, vma->vm_end - vma->vm_start); in DRM() 328 unsigned long length = vma->vm_end - vma->vm_start; in DRM() 334 vma->vm_start, vma->vm_end, VM_OFFSET(vma)); in DRM() 374 vma->vm_start, vma->vm_end, VM_OFFSET(vma)); in DRM() 401 if (map->size != vma->vm_end - vma->vm_start) return -EINVAL; in DRM() [all …]
|
/linux-2.4.37.9/arch/ia64/mm/ |
D | hugetlbpage.c | 145 if (prev->vm_end > start) { in hugetlb_free_pgtables() 162 if (prev->vm_end > first) in hugetlb_free_pgtables() 163 first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1; in hugetlb_free_pgtables() 188 unsigned long end = vma->vm_end; in copy_hugetlb_page_range() 235 (start < vma->vm_end)) in follow_hugetlb_page() 237 } while (len && start < vma->vm_end); in follow_hugetlb_page() 302 BUG_ON(vma->vm_end & ~HPAGE_MASK); in hugetlb_prefault() 305 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { in hugetlb_prefault() 362 addr = COLOR_HALIGN(vmm->vm_end); in hugetlb_get_unmapped_area()
|
D | fault.c | 39 vma->vm_end += PAGE_SIZE; in expand_backing_store() 156 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { in ia64_do_page_fault()
|
/linux-2.4.37.9/arch/ia64/ia32/ |
D | binfmt_elf32.c | 89 vma->vm_end = vma->vm_start + max(PAGE_SIZE, 2*IA32_PAGE_SIZE); in ia64_elf32_init() 115 vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); in ia64_elf32_init() 192 mpnt->vm_end = IA32_STACK_TOP; in ia32_setup_arg_pages() 204 current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; in ia32_setup_arg_pages()
|
/linux-2.4.37.9/arch/mips/kernel/ |
D | sysmips.c | 39 address = vma->vm_end - address; in get_max_hostname() 42 if (vma->vm_next && vma->vm_next->vm_start == vma->vm_end && in get_max_hostname()
|
/linux-2.4.37.9/arch/s390x/kernel/ |
D | exec32.c | 61 mpnt->vm_end = STACK_TOP; in setup_arg_pages32() 73 current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; in setup_arg_pages32()
|
/linux-2.4.37.9/arch/parisc/kernel/ |
D | sys_parisc.c | 50 addr = vma->vm_end; in get_unshared_area() 75 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; in get_shared_area() 76 if (addr < vma->vm_end) /* handle wraparound */ in get_shared_area()
|
/linux-2.4.37.9/fs/proc/ |
D | array.c | 191 unsigned long len = (vma->vm_end - vma->vm_start) >> 10; in task_mem() 337 vsize += vma->vm_end - vma->vm_start; in proc_pid_stat() 507 statm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total); in proc_pid_statm() 516 else if (vma->vm_end > 0x60000000) in proc_pid_statm() 546 map->vm_end, in show_map()
|
/linux-2.4.37.9/arch/ia64/sn/io/drivers/ |
D | pciba.c | 850 DPRINTF("vma->vm_end is %lx\n", vma->vm_end); in dma_mmap() 891 DPRINTF("vma->vm_end is %lx\n", vma->vm_end); in mmap_pci_address() 916 vma->vm_end-vma->vm_start, in mmap_pci_address() 929 DPRINTF("vma->vm_end is %lx\n", vma->vm_end); in mmap_kernel_address() 943 vma->vm_end-vma->vm_start, in mmap_kernel_address()
|
/linux-2.4.37.9/drivers/char/ftape/lowlevel/ |
D | ftape-ctl.c | 713 if ((vma->vm_end - vma->vm_start) % FT_BUFF_SIZE != 0) { in ftape_mmap() 716 vma->vm_end - vma->vm_start, in ftape_mmap() 719 num_buffers = (vma->vm_end - vma->vm_start) / FT_BUFF_SIZE; in ftape_mmap() 723 vma->vm_end - vma->vm_start, in ftape_mmap()
|
/linux-2.4.37.9/arch/sh/kernel/ |
D | sys_sh.c | 83 addr = vma->vm_end; in arch_get_unmapped_area() 241 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) in sys_cacheflush()
|
/linux-2.4.37.9/drivers/sbus/char/ |
D | flash.c | 70 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) in flash_mmap() 71 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); in flash_mmap()
|
/linux-2.4.37.9/drivers/char/ |
D | mem.c | 207 if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start, in mmap_mem() 400 count = vma->vm_end - addr; in read_zero_pagealigned() 476 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) in mmap_zero() 584 unsigned long size = vma->vm_end - vma->vm_start; in mmap_kmem()
|
D | fetchop.c | 287 pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in fetchop_mmap() 305 while (vm_start < vma->vm_end) { in fetchop_mmap()
|
/linux-2.4.37.9/arch/sh64/mm/ |
D | cache.c | 282 if (!vma || (aligned_start <= vma->vm_end)) { in sh64_icache_inv_user_page_range() 287 vma_end = vma->vm_end; in sh64_icache_inv_user_page_range() 296 aligned_start = vma->vm_end; /* Skip to start of next region */ in sh64_icache_inv_user_page_range()
|
/linux-2.4.37.9/arch/ppc/mm/ |
D | tlb.c | 84 local_flush_tlb_range(mm, mp->vm_start, mp->vm_end); in local_flush_tlb_mm()
|
/linux-2.4.37.9/fs/ncpfs/ |
D | mmap.c | 111 if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff in ncp_mmap()
|
/linux-2.4.37.9/arch/x86_64/ia32/ |
D | ia32_binfmt.c | 245 mpnt->vm_end = IA32_STACK_TOP; in ia32_setup_arg_pages() 258 current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; in ia32_setup_arg_pages()
|
/linux-2.4.37.9/Documentation/vm/ |
D | locking | 24 Any code that modifies the vmlist, or the vm_start/vm_end/ 57 Code that changes vm_start/vm_end/vm_flags:VM_LOCKED of vma's on 64 It is advisable that changes to vm_start/vm_end be protected, although
|