/linux-2.6.39/drivers/gpu/drm/ ! |
D | drm_vm.c | 42 static void drm_vm_open(struct vm_area_struct *vma); 43 static void drm_vm_close(struct vm_area_struct *vma); 45 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_io_prot() argument 47 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot() 59 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot() 60 vma->vm_start)) in drm_io_prot() 70 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument 72 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot() 91 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in drm_do_vm_fault() argument 93 struct drm_file *priv = vma->vm_file->private_data; in drm_do_vm_fault() [all …]
|
/linux-2.6.39/mm/ ! |
D | mmap.c | 50 struct vm_area_struct *vma, struct vm_area_struct *prev, 195 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument 198 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct() 200 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct() 204 if (unlikely(vma->vm_flags & VM_NONLINEAR)) in __remove_shared_vm_struct() 205 list_del_init(&vma->shared.vm_set.list); in __remove_shared_vm_struct() 207 vma_prio_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() 215 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument 217 struct file *file = vma->vm_file; in unlink_file_vma() 222 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma() [all …]
|
D | madvise.c | 39 static long madvise_behavior(struct vm_area_struct * vma, in madvise_behavior() argument 43 struct mm_struct * mm = vma->vm_mm; in madvise_behavior() 46 unsigned long new_flags = vma->vm_flags; in madvise_behavior() 62 if (vma->vm_flags & VM_IO) { in madvise_behavior() 70 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior() 76 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior() 82 if (new_flags == vma->vm_flags) { in madvise_behavior() 83 *prev = vma; in madvise_behavior() 87 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior() 88 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior() [all …]
|
D | prio_tree.c | 28 #define RADIX_INDEX(vma) ((vma)->vm_pgoff) argument 29 #define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT) argument 31 #define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1)) argument 75 void vma_prio_tree_add(struct vm_area_struct *vma, struct vm_area_struct *old) in vma_prio_tree_add() argument 78 BUG_ON(RADIX_INDEX(vma) != RADIX_INDEX(old)); in vma_prio_tree_add() 79 BUG_ON(HEAP_INDEX(vma) != HEAP_INDEX(old)); in vma_prio_tree_add() 81 vma->shared.vm_set.head = NULL; in vma_prio_tree_add() 82 vma->shared.vm_set.parent = NULL; in vma_prio_tree_add() 85 list_add(&vma->shared.vm_set.list, in vma_prio_tree_add() 88 list_add_tail(&vma->shared.vm_set.list, in vma_prio_tree_add() [all …]
|
D | nommu.c | 114 struct vm_area_struct *vma; in kobjsize() local 116 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 117 if (vma) in kobjsize() 118 return vma->vm_end - vma->vm_start; in kobjsize() 133 struct vm_area_struct *vma; in __get_user_pages() local 146 vma = find_vma(mm, start); in __get_user_pages() 147 if (!vma) in __get_user_pages() 151 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages() 152 !(vm_flags & vma->vm_flags)) in __get_user_pages() 161 vmas[i] = vma; in __get_user_pages() [all …]
|
D | mremap.c | 51 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 68 if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr)) in alloc_new_pmd() 74 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument 80 struct mm_struct *mm = vma->vm_mm; in move_ptes() 86 mmu_notifier_invalidate_range_start(vma->vm_mm, in move_ptes() 88 if (vma->vm_file) { in move_ptes() 95 mapping = vma->vm_file->f_mapping; in move_ptes() 115 pte = ptep_clear_flush(vma, old_addr, old_pte); in move_ptes() 127 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); in move_ptes() 132 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument [all …]
|
D | mprotect.c | 81 static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud, in change_pmd_range() argument 93 split_huge_page_pmd(vma->vm_mm, pmd); in change_pmd_range() 94 else if (change_huge_pmd(vma, pmd, addr, newprot)) in change_pmd_range() 100 change_pte_range(vma->vm_mm, pmd, addr, next, newprot, in change_pmd_range() 105 static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, in change_pud_range() argument 117 change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range() 122 static void change_protection(struct vm_area_struct *vma, in change_protection() argument 126 struct mm_struct *mm = vma->vm_mm; in change_protection() 133 flush_cache_range(vma, addr, end); in change_protection() 138 change_pud_range(vma, pgd, addr, next, newprot, in change_protection() [all …]
|
D | mlock.c | 150 static long __mlock_vma_pages_range(struct vm_area_struct *vma, in __mlock_vma_pages_range() argument 154 struct mm_struct *mm = vma->vm_mm; in __mlock_vma_pages_range() 161 VM_BUG_ON(start < vma->vm_start); in __mlock_vma_pages_range() 162 VM_BUG_ON(end > vma->vm_end); in __mlock_vma_pages_range() 171 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) in __mlock_vma_pages_range() 178 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) in __mlock_vma_pages_range() 210 long mlock_vma_pages_range(struct vm_area_struct *vma, in mlock_vma_pages_range() argument 214 BUG_ON(!(vma->vm_flags & VM_LOCKED)); in mlock_vma_pages_range() 219 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in mlock_vma_pages_range() 222 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || in mlock_vma_pages_range() [all …]
|
D | rmap.c | 129 int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument 131 struct anon_vma *anon_vma = vma->anon_vma; in anon_vma_prepare() 136 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare() 143 anon_vma = find_mergeable_anon_vma(vma); in anon_vma_prepare() 155 if (likely(!vma->anon_vma)) { in anon_vma_prepare() 156 vma->anon_vma = anon_vma; in anon_vma_prepare() 158 avc->vma = vma; in anon_vma_prepare() 159 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_prepare() 180 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 184 avc->vma = vma; in anon_vma_chain_link() [all …]
|
D | memory.c | 362 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument 365 while (vma) { in free_pgtables() 366 struct vm_area_struct *next = vma->vm_next; in free_pgtables() 367 unsigned long addr = vma->vm_start; in free_pgtables() 373 unlink_anon_vmas(vma); in free_pgtables() 374 unlink_file_vma(vma); in free_pgtables() 376 if (is_vm_hugetlb_page(vma)) { in free_pgtables() 377 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables() 383 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables() 385 vma = next; in free_pgtables() [all …]
|
D | fremap.c | 26 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, in zap_pte() argument 34 flush_cache_page(vma, addr, pte_pfn(pte)); in zap_pte() 35 pte = ptep_clear_flush(vma, addr, ptep); in zap_pte() 36 page = vm_normal_page(vma, addr, pte); in zap_pte() 56 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, in install_file_pte() argument 68 zap_pte(mm, vma, addr, pte); in install_file_pte() 84 static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma, in populate_range() argument 90 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot); in populate_range() 128 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local 157 vma = find_vma(mm, start); in SYSCALL_DEFINE5() [all …]
|
D | pgtable-generic.c | 23 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 29 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 30 flush_tlb_page(vma, address); in ptep_set_access_flags() 37 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument 45 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 46 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_set_access_flags() 57 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 61 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 63 flush_tlb_page(vma, address); in ptep_clear_flush_young() 69 int pmdp_clear_flush_young(struct vm_area_struct *vma, in pmdp_clear_flush_young() argument [all …]
|
D | mincore.c | 22 static void mincore_hugetlb_page_range(struct vm_area_struct *vma, in mincore_hugetlb_page_range() argument 29 h = hstate_vma(vma); in mincore_hugetlb_page_range() 86 static void mincore_unmapped_range(struct vm_area_struct *vma, in mincore_unmapped_range() argument 93 if (vma->vm_file) { in mincore_unmapped_range() 96 pgoff = linear_page_index(vma, addr); in mincore_unmapped_range() 98 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in mincore_unmapped_range() 105 static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in mincore_pte_range() argument 113 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mincore_pte_range() 120 mincore_unmapped_range(vma, addr, next, vec); in mincore_pte_range() 125 *vec = mincore_page(vma->vm_file->f_mapping, pgoff); in mincore_pte_range() [all …]
|
/linux-2.6.39/drivers/media/video/ ! |
D | videobuf2-memops.c | 37 struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma) in vb2_get_vma() argument 45 if (vma->vm_ops && vma->vm_ops->open) in vb2_get_vma() 46 vma->vm_ops->open(vma); in vb2_get_vma() 48 if (vma->vm_file) in vb2_get_vma() 49 get_file(vma->vm_file); in vb2_get_vma() 51 memcpy(vma_copy, vma, sizeof(*vma)); in vb2_get_vma() 67 void vb2_put_vma(struct vm_area_struct *vma) in vb2_put_vma() argument 69 if (!vma) in vb2_put_vma() 72 if (vma->vm_file) in vb2_put_vma() 73 fput(vma->vm_file); in vb2_put_vma() [all …]
|
/linux-2.6.39/drivers/infiniband/hw/ehca/ ! |
D | ehca_uverbs.c | 71 static void ehca_mm_open(struct vm_area_struct *vma) in ehca_mm_open() argument 73 u32 *count = (u32 *)vma->vm_private_data; in ehca_mm_open() 76 vma->vm_start, vma->vm_end); in ehca_mm_open() 82 vma->vm_start, vma->vm_end); in ehca_mm_open() 84 vma->vm_start, vma->vm_end, *count); in ehca_mm_open() 87 static void ehca_mm_close(struct vm_area_struct *vma) in ehca_mm_close() argument 89 u32 *count = (u32 *)vma->vm_private_data; in ehca_mm_close() 92 vma->vm_start, vma->vm_end); in ehca_mm_close() 97 vma->vm_start, vma->vm_end, *count); in ehca_mm_close() 105 static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, in ehca_mmap_fw() argument [all …]
|
/linux-2.6.39/fs/proc/ ! |
D | task_mmu.c | 91 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) in vma_stop() argument 93 if (vma && vma != priv->tail_vma) { in vma_stop() 94 struct mm_struct *mm = vma->vm_mm; in vma_stop() 105 struct vm_area_struct *vma, *tail_vma = NULL; in m_start() local 135 vma = find_vma(mm, last_addr); in m_start() 136 if (last_addr && vma) { in m_start() 137 vma = vma->vm_next; in m_start() 145 vma = NULL; in m_start() 147 vma = mm->mmap; in m_start() 148 while (l-- && vma) in m_start() [all …]
|
/linux-2.6.39/arch/x86/kernel/ ! |
D | sys_x86_64.c | 65 struct vm_area_struct *vma; in arch_get_unmapped_area() local 79 vma = find_vma(mm, addr); in arch_get_unmapped_area() 81 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 95 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { in arch_get_unmapped_area() 109 if (!vma || addr + len <= vma->vm_start) { in arch_get_unmapped_area() 116 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area() 117 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area() 119 addr = vma->vm_end; in arch_get_unmapped_area() 129 struct vm_area_struct *vma; in arch_get_unmapped_area_topdown() local 147 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown() [all …]
|
/linux-2.6.39/arch/sh/mm/ ! |
D | mmap.c | 49 struct vm_area_struct *vma; in arch_get_unmapped_area() local 76 vma = find_vma(mm, addr); in arch_get_unmapped_area() 78 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 95 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { in arch_get_unmapped_area() 109 if (likely(!vma || addr + len <= vma->vm_start)) { in arch_get_unmapped_area() 116 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area() 117 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area() 119 addr = vma->vm_end; in arch_get_unmapped_area() 130 struct vm_area_struct *vma; in arch_get_unmapped_area_topdown() local 159 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown() [all …]
|
/linux-2.6.39/drivers/xen/xenfs/ ! |
D | privcmd.c | 36 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); 150 struct vm_area_struct *vma; member 158 struct vm_area_struct *vma = st->vma; in mmap_mfn_range() local 168 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_mfn_range() 171 rc = xen_remap_domain_mfn_range(vma, in mmap_mfn_range() 174 vma->vm_page_prot, in mmap_mfn_range() 188 struct vm_area_struct *vma; in privcmd_ioctl_mmap() local 213 vma = find_vma(mm, msg->va); in privcmd_ioctl_mmap() 216 if (!vma || (msg->va != vma->vm_start) || in privcmd_ioctl_mmap() 217 !privcmd_enforce_singleshot_mapping(vma)) in privcmd_ioctl_mmap() [all …]
|
/linux-2.6.39/arch/frv/mm/ ! |
D | elf-fdpic.c | 62 struct vm_area_struct *vma; in arch_get_unmapped_area() local 75 vma = find_vma(current->mm, addr); in arch_get_unmapped_area() 77 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 88 vma = find_vma(current->mm, PAGE_SIZE); in arch_get_unmapped_area() 89 for (; vma; vma = vma->vm_next) { in arch_get_unmapped_area() 92 if (addr + len <= vma->vm_start) in arch_get_unmapped_area() 94 addr = vma->vm_end; in arch_get_unmapped_area() 103 vma = find_vma(current->mm, addr); in arch_get_unmapped_area() 104 for (; vma; vma = vma->vm_next) { in arch_get_unmapped_area() 107 if (addr + len <= vma->vm_start) in arch_get_unmapped_area() [all …]
|
/linux-2.6.39/arch/m32r/include/asm/ ! |
D | cacheflush.h | 13 #define flush_cache_range(vma, start, end) do { } while (0) argument 14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 21 #define flush_icache_page(vma,pg) _flush_cache_copyback_all() argument 22 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_copyback_all() argument 27 #define flush_icache_page(vma,pg) smp_flush_cache_all() argument 28 #define flush_icache_user_range(vma,pg,adr,len) smp_flush_cache_all() argument 35 #define flush_cache_range(vma, start, end) do { } while (0) argument 36 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 42 #define flush_icache_page(vma,pg) _flush_cache_all() argument 43 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_all() argument [all …]
|
/linux-2.6.39/arch/x86/mm/ ! |
D | hugetlbpage.c | 20 struct vm_area_struct *vma, in page_table_shareable() argument 29 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; in page_table_shareable() 44 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) in vma_shareable() argument 52 if (vma->vm_flags & VM_MAYSHARE && in vma_shareable() 53 vma->vm_start <= base && end <= vma->vm_end) in vma_shareable() 63 struct vm_area_struct *vma = find_vma(mm, addr); in huge_pmd_share() local 64 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share() 65 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share() 66 vma->vm_pgoff; in huge_pmd_share() 72 if (!vma_shareable(vma, addr)) in huge_pmd_share() [all …]
|
/linux-2.6.39/drivers/gpu/drm/nouveau/ ! |
D | nouveau_vm.c | 31 nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) in nouveau_vm_map_at() argument 33 struct nouveau_vm *vm = vma->vm; in nouveau_vm_map_at() 35 int big = vma->node->type != vm->spg_shift; in nouveau_vm_map_at() 36 u32 offset = vma->node->offset + (delta >> 12); in nouveau_vm_map_at() 37 u32 bits = vma->node->type - 12; in nouveau_vm_map_at() 56 vm->map(vma, pgt, node, pte, len, phys, delta); in nouveau_vm_map_at() 65 delta += (u64)len << vma->node->type; in nouveau_vm_map_at() 73 nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node) in nouveau_vm_map() argument 75 nouveau_vm_map_at(vma, 0, node); in nouveau_vm_map() 79 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, in nouveau_vm_map_sg() argument [all …]
|
/linux-2.6.39/arch/powerpc/include/asm/ ! |
D | tlbflush.h | 37 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 42 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 49 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 54 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) argument 57 #define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) argument 65 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 66 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 67 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 70 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument 73 flush_tlb_page(vma, vmaddr); in local_flush_tlb_page() [all …]
|
/linux-2.6.39/arch/sparc/include/asm/ ! |
D | cacheflush_32.h | 23 #define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end) argument 24 #define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr) argument 34 extern void smp_flush_cache_range(struct vm_area_struct *vma, 37 extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 52 #define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end) argument 53 #define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr) argument 55 #define flush_icache_page(vma, pg) do { } while (0) argument 57 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 59 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 61 flush_cache_page(vma, vaddr, page_to_pfn(page));\ [all …]
|