Lines Matching refs:vma

99 		struct vm_area_struct *vma;  in kobjsize()  local
101 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
102 if (vma) in kobjsize()
103 return vma->vm_end - vma->vm_start; in kobjsize()
154 struct vm_area_struct *vma; in __vmalloc_user_flags() local
157 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags()
158 if (vma) in __vmalloc_user_flags()
159 vm_flags_set(vma, VM_USERMAP); in __vmalloc_user_flags()
335 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
342 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument
349 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() argument
356 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero() argument
536 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm) in setup_vma_to_mm() argument
538 vma->vm_mm = mm; in setup_vma_to_mm()
541 if (vma->vm_file) { in setup_vma_to_mm()
542 struct address_space *mapping = vma->vm_file->f_mapping; in setup_vma_to_mm()
546 vma_interval_tree_insert(vma, &mapping->i_mmap); in setup_vma_to_mm()
552 static void cleanup_vma_from_mm(struct vm_area_struct *vma) in cleanup_vma_from_mm() argument
554 vma->vm_mm->map_count--; in cleanup_vma_from_mm()
556 if (vma->vm_file) { in cleanup_vma_from_mm()
558 mapping = vma->vm_file->f_mapping; in cleanup_vma_from_mm()
562 vma_interval_tree_remove(vma, &mapping->i_mmap); in cleanup_vma_from_mm()
571 static int delete_vma_from_mm(struct vm_area_struct *vma) in delete_vma_from_mm() argument
573 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); in delete_vma_from_mm()
575 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); in delete_vma_from_mm()
581 cleanup_vma_from_mm(vma); in delete_vma_from_mm()
590 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) in delete_vma() argument
592 vma_close(vma); in delete_vma()
593 if (vma->vm_file) in delete_vma()
594 fput(vma->vm_file); in delete_vma()
595 put_nommu_region(vma->vm_region); in delete_vma()
596 vm_area_free(vma); in delete_vma()
629 struct vm_area_struct *vma; in lock_mm_and_find_vma() local
632 vma = vma_lookup(mm, addr); in lock_mm_and_find_vma()
633 if (!vma) in lock_mm_and_find_vma()
635 return vma; in lock_mm_and_find_vma()
642 int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr) in expand_stack_locked() argument
661 struct vm_area_struct *vma; in find_vma_exact() local
665 vma = vma_iter_load(&vmi); in find_vma_exact()
666 if (!vma) in find_vma_exact()
668 if (vma->vm_start != addr) in find_vma_exact()
670 if (vma->vm_end != end) in find_vma_exact()
673 return vma; in find_vma_exact()
883 static int do_mmap_shared_file(struct vm_area_struct *vma) in do_mmap_shared_file() argument
887 ret = mmap_file(vma->vm_file, vma); in do_mmap_shared_file()
889 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_shared_file()
904 static int do_mmap_private(struct vm_area_struct *vma, in do_mmap_private() argument
920 ret = mmap_file(vma->vm_file, vma); in do_mmap_private()
922 if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags))) in do_mmap_private()
925 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_private()
955 vm_flags_set(vma, VM_MAPPED_COPY); in do_mmap_private()
956 region->vm_flags = vma->vm_flags; in do_mmap_private()
961 vma->vm_start = region->vm_start; in do_mmap_private()
962 vma->vm_end = region->vm_start + len; in do_mmap_private()
964 if (vma->vm_file) { in do_mmap_private()
968 fpos = vma->vm_pgoff; in do_mmap_private()
971 ret = kernel_read(vma->vm_file, base, len, &fpos); in do_mmap_private()
980 vma_set_anonymous(vma); in do_mmap_private()
987 region->vm_start = vma->vm_start = 0; in do_mmap_private()
988 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1012 struct vm_area_struct *vma; in do_mmap() local
1042 vma = vm_area_alloc(current->mm); in do_mmap()
1043 if (!vma) in do_mmap()
1050 vm_flags_init(vma, vm_flags); in do_mmap()
1051 vma->vm_pgoff = pgoff; in do_mmap()
1055 vma->vm_file = get_file(file); in do_mmap()
1107 vma->vm_region = pregion; in do_mmap()
1110 vma->vm_start = start; in do_mmap()
1111 vma->vm_end = start + len; in do_mmap()
1114 vm_flags_set(vma, VM_MAPPED_COPY); in do_mmap()
1116 ret = do_mmap_shared_file(vma); in do_mmap()
1118 vma->vm_region = NULL; in do_mmap()
1119 vma->vm_start = 0; in do_mmap()
1120 vma->vm_end = 0; in do_mmap()
1154 vma->vm_start = region->vm_start = addr; in do_mmap()
1155 vma->vm_end = region->vm_end = addr + len; in do_mmap()
1160 vma->vm_region = region; in do_mmap()
1165 if (file && vma->vm_flags & VM_SHARED) in do_mmap()
1166 ret = do_mmap_shared_file(vma); in do_mmap()
1168 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap()
1174 if (!vma->vm_file && in do_mmap()
1181 result = vma->vm_start; in do_mmap()
1186 BUG_ON(!vma->vm_region); in do_mmap()
1187 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); in do_mmap()
1188 if (vma_iter_prealloc(&vmi, vma)) in do_mmap()
1191 setup_vma_to_mm(vma, current->mm); in do_mmap()
1194 vma_iter_store(&vmi, vma); in do_mmap()
1198 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap()
1214 if (vma->vm_file) in do_mmap()
1215 fput(vma->vm_file); in do_mmap()
1216 vm_area_free(vma); in do_mmap()
1296 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, in split_vma() argument
1306 if (vma->vm_file) in split_vma()
1309 mm = vma->vm_mm; in split_vma()
1317 new = vm_area_dup(vma); in split_vma()
1322 *region = *vma->vm_region; in split_vma()
1325 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1335 if (vma_iter_prealloc(vmi, vma)) { in split_vma()
1345 delete_nommu_region(vma->vm_region); in split_vma()
1347 vma->vm_region->vm_start = vma->vm_start = addr; in split_vma()
1348 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
1350 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma()
1351 vma->vm_region->vm_top = addr; in split_vma()
1353 add_nommu_region(vma->vm_region); in split_vma()
1357 setup_vma_to_mm(vma, mm); in split_vma()
1375 struct vm_area_struct *vma, in vmi_shrink_vma() argument
1382 if (from > vma->vm_start) { in vmi_shrink_vma()
1383 if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL)) in vmi_shrink_vma()
1385 vma->vm_end = from; in vmi_shrink_vma()
1387 if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL)) in vmi_shrink_vma()
1389 vma->vm_start = to; in vmi_shrink_vma()
1393 region = vma->vm_region; in vmi_shrink_vma()
1419 struct vm_area_struct *vma; in do_munmap() local
1430 vma = vma_find(&vmi, end); in do_munmap()
1431 if (!vma) { in do_munmap()
1443 if (vma->vm_file) { in do_munmap()
1445 if (start > vma->vm_start) in do_munmap()
1447 if (end == vma->vm_end) in do_munmap()
1449 vma = vma_find(&vmi, end); in do_munmap()
1450 } while (vma); in do_munmap()
1454 if (start == vma->vm_start && end == vma->vm_end) in do_munmap()
1456 if (start < vma->vm_start || end > vma->vm_end) in do_munmap()
1460 if (end != vma->vm_end && offset_in_page(end)) in do_munmap()
1462 if (start != vma->vm_start && end != vma->vm_end) { in do_munmap()
1463 ret = split_vma(&vmi, vma, start, 1); in do_munmap()
1467 return vmi_shrink_vma(&vmi, vma, start, end); in do_munmap()
1471 if (delete_vma_from_mm(vma)) in do_munmap()
1474 delete_vma(mm, vma); in do_munmap()
1501 struct vm_area_struct *vma; in exit_mmap() local
1513 for_each_vma(vmi, vma) { in exit_mmap()
1514 cleanup_vma_from_mm(vma); in exit_mmap()
1515 delete_vma(mm, vma); in exit_mmap()
1536 struct vm_area_struct *vma; in do_mremap() local
1550 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1551 if (!vma) in do_mremap()
1554 if (vma->vm_end != vma->vm_start + old_len) in do_mremap()
1557 if (is_nommu_shared_mapping(vma->vm_flags)) in do_mremap()
1560 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) in do_mremap()
1564 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1565 return vma->vm_start; in do_mremap()
1580 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
1586 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); in remap_pfn_range()
1591 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
1594 unsigned long vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1596 pfn += vma->vm_pgoff; in vm_iomap_memory()
1597 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1601 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, in remap_vmalloc_range() argument
1604 unsigned int size = vma->vm_end - vma->vm_start; in remap_vmalloc_range()
1606 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range()
1609 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); in remap_vmalloc_range()
1610 vma->vm_end = vma->vm_start + size; in remap_vmalloc_range()
1634 struct vm_area_struct *vma; in __access_remote_vm() local
1641 vma = find_vma(mm, addr); in __access_remote_vm()
1642 if (vma) { in __access_remote_vm()
1644 if (addr + len >= vma->vm_end) in __access_remote_vm()
1645 len = vma->vm_end - addr; in __access_remote_vm()
1648 if (write && vma->vm_flags & VM_MAYWRITE) in __access_remote_vm()
1649 copy_to_user_page(vma, NULL, addr, in __access_remote_vm()
1651 else if (!write && vma->vm_flags & VM_MAYREAD) in __access_remote_vm()
1652 copy_from_user_page(vma, NULL, addr, in __access_remote_vm()
1718 struct vm_area_struct *vma; in nommu_shrink_inode_mappings() local
1730 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings()
1733 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings()
1746 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
1747 if (!(vma->vm_flags & VM_SHARED)) in nommu_shrink_inode_mappings()
1750 region = vma->vm_region; in nommu_shrink_inode_mappings()