Lines Matching refs:vma
55 .vma = vma_, \
63 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; in is_mergeable_vma() local
65 if (!mpol_equal(vmg->policy, vma_policy(vma))) in is_mergeable_vma()
75 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
77 if (vma->vm_file != vmg->file) in is_mergeable_vma()
79 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) in is_mergeable_vma()
81 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) in is_mergeable_vma()
87 struct anon_vma *anon_vma2, struct vm_area_struct *vma) in is_mergeable_anon_vma() argument
93 if ((!anon_vma1 || !anon_vma2) && (!vma || in is_mergeable_anon_vma()
94 list_is_singular(&vma->anon_vma_chain))) in is_mergeable_anon_vma()
115 struct vm_area_struct *vma, in init_multi_vma_prep() argument
121 vp->vma = vma; in init_multi_vma_prep()
122 vp->anon_vma = vma->anon_vma; in init_multi_vma_prep()
129 vp->file = vma->vm_file; in init_multi_vma_prep()
131 vp->mapping = vma->vm_file->f_mapping; in init_multi_vma_prep()
180 static void __vma_link_file(struct vm_area_struct *vma, in __vma_link_file() argument
183 if (vma_is_shared_maywrite(vma)) in __vma_link_file()
187 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file()
194 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
197 if (vma_is_shared_maywrite(vma)) in __remove_shared_vm_struct()
201 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
220 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_pre_update_vma() argument
224 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_pre_update_vma()
229 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_post_update_vma() argument
233 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_post_update_vma()
244 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); in vma_prepare()
265 anon_vma_interval_tree_pre_update_vma(vp->vma); in vma_prepare()
272 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); in vma_prepare()
295 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); in vma_complete()
314 anon_vma_interval_tree_post_update_vma(vp->vma); in vma_complete()
322 uprobe_mmap(vp->vma); in vma_complete()
337 anon_vma_merge(vp->vma, vp->remove); in vma_complete()
341 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); in vma_complete()
363 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) in init_vma_prep() argument
365 init_multi_vma_prep(vp, vma, NULL, NULL, NULL); in init_vma_prep()
409 void remove_vma(struct vm_area_struct *vma, bool unreachable) in remove_vma() argument
412 vma_close(vma); in remove_vma()
413 if (vma->vm_file) in remove_vma()
414 fput(vma->vm_file); in remove_vma()
415 mpol_put(vma_policy(vma)); in remove_vma()
417 __vm_area_free(vma); in remove_vma()
419 vm_area_free(vma); in remove_vma()
427 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, in unmap_region() argument
430 struct mm_struct *mm = vma->vm_mm; in unmap_region()
435 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, in unmap_region()
437 mas_set(mas, vma->vm_end); in unmap_region()
438 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
450 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, in __split_vma() argument
457 WARN_ON(vma->vm_start >= addr); in __split_vma()
458 WARN_ON(vma->vm_end <= addr); in __split_vma()
460 if (vma->vm_ops && vma->vm_ops->may_split) { in __split_vma()
461 err = vma->vm_ops->may_split(vma, addr); in __split_vma()
466 new = vm_area_dup(vma); in __split_vma()
474 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
482 err = vma_dup_policy(vma, new); in __split_vma()
486 err = anon_vma_clone(new, vma); in __split_vma()
496 vma_start_write(vma); in __split_vma()
499 init_vma_prep(&vp, vma); in __split_vma()
502 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); in __split_vma()
505 vma->vm_start = addr; in __split_vma()
506 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; in __split_vma()
508 vma->vm_end = addr; in __split_vma()
512 vma_complete(&vp, vmi, vma->vm_mm); in __split_vma()
513 validate_mm(vma->vm_mm); in __split_vma()
536 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, in split_vma() argument
539 if (vma->vm_mm->map_count >= sysctl_max_map_count) in split_vma()
542 return __split_vma(vmi, vma, addr, new_below); in split_vma()
581 struct vm_area_struct *vma; in validate_mm() local
585 for_each_vma(vmi, vma) { in validate_mm()
587 struct anon_vma *anon_vma = vma->anon_vma; in validate_mm()
595 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) in validate_mm()
598 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) in validate_mm()
604 dump_vma(vma); in validate_mm()
605 pr_emerg("tree range: %px start %lx end %lx\n", vma, in validate_mm()
613 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in validate_mm()
642 init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2); in commit_merge()
655 if (vma_iter_prealloc(vmg->vmi, vmg->vma)) in commit_merge()
659 vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start); in commit_merge()
660 vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff); in commit_merge()
663 vma_iter_store(vmg->vmi, vmg->vma); in commit_merge()
674 vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm); in commit_merge()
680 static bool can_merge_remove_vma(struct vm_area_struct *vma) in can_merge_remove_vma() argument
682 return !vma->vm_ops || !vma->vm_ops->close; in can_merge_remove_vma()
716 struct vm_area_struct *vma = vmg->vma; in vma_merge_existing_range() local
723 bool left_side = vma && start == vma->vm_start; in vma_merge_existing_range()
724 bool right_side = vma && end == vma->vm_end; in vma_merge_existing_range()
732 VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */ in vma_merge_existing_range()
741 VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) || in vma_merge_existing_range()
742 vmg->end > vma->vm_end), vmg); in vma_merge_existing_range()
744 VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start && in vma_merge_existing_range()
745 vma_iter_addr(vmg->vmi) < vma->vm_end), vmg); in vma_merge_existing_range()
785 if (merge_will_delete_vma && !can_merge_remove_vma(vma)) in vma_merge_existing_range()
805 vma_start_write(vma); in vma_merge_existing_range()
821 vmg->vma = prev; in vma_merge_existing_range()
831 err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup); in vma_merge_existing_range()
841 vmg->vma = prev; in vma_merge_existing_range()
846 adjust = vma; in vma_merge_existing_range()
847 adj_start = vmg->end - vma->vm_start; in vma_merge_existing_range()
850 err = dup_anon_vma(prev, vma, &anon_dup); in vma_merge_existing_range()
864 VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg); in vma_merge_existing_range()
867 vmg->vma = next; in vma_merge_existing_range()
878 vmg->start = vma->vm_start; in vma_merge_existing_range()
880 vmg->pgoff = vma->vm_pgoff; in vma_merge_existing_range()
883 adj_start = -(vma->vm_end - start); in vma_merge_existing_range()
886 err = dup_anon_vma(next, vma, &anon_dup); in vma_merge_existing_range()
900 merge_will_delete_vma ? vma : NULL, in vma_merge_existing_range()
990 VM_WARN_ON_VMG(vmg->vma, vmg); in vma_merge_new_range()
1006 vmg->vma = next; in vma_merge_new_range()
1012 vmg->vma = prev; in vma_merge_new_range()
1034 if (vmg->vma && !vma_expand(vmg)) { in vma_merge_new_range()
1035 khugepaged_enter_vma(vmg->vma, vmg->flags); in vma_merge_new_range()
1037 return vmg->vma; in vma_merge_new_range()
1063 struct vm_area_struct *vma = vmg->vma; in vma_expand() local
1068 vma_start_write(vma); in vma_expand()
1069 if (next && (vma != next) && (vmg->end == next->vm_end)) { in vma_expand()
1076 ret = dup_anon_vma(vma, next, &anon_dup); in vma_expand()
1083 next != vma && vmg->end > next->vm_start, vmg); in vma_expand()
1085 VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg); in vma_expand()
1114 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, in vma_shrink() argument
1119 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); in vma_shrink()
1121 if (vma->vm_start < start) in vma_shrink()
1122 vma_iter_config(vmi, vma->vm_start, start); in vma_shrink()
1124 vma_iter_config(vmi, end, vma->vm_end); in vma_shrink()
1129 vma_start_write(vma); in vma_shrink()
1131 init_vma_prep(&vp, vma); in vma_shrink()
1133 vma_adjust_trans_huge(vma, start, end, 0); in vma_shrink()
1136 vma_set_range(vma, start, end, pgoff); in vma_shrink()
1137 vma_complete(&vp, vmi, vma->vm_mm); in vma_shrink()
1138 validate_mm(vma->vm_mm); in vma_shrink()
1155 tlb_gather_mmu(&tlb, vms->vma->vm_mm); in vms_clear_ptes()
1156 update_hiwater_rss(vms->vma->vm_mm); in vms_clear_ptes()
1157 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, in vms_clear_ptes()
1162 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, in vms_clear_ptes()
1171 struct vm_area_struct *vma; in vms_clean_up_area() local
1178 mas_for_each(mas_detach, vma, ULONG_MAX) in vms_clean_up_area()
1179 vma_close(vma); in vms_clean_up_area()
1194 struct vm_area_struct *vma; in vms_complete_munmap_vmas() local
1221 mas_for_each(mas_detach, vma, ULONG_MAX) in vms_complete_munmap_vmas()
1222 remove_vma(vma, /* unreachable = */ false); in vms_complete_munmap_vmas()
1240 struct vm_area_struct *vma; in reattach_vmas() local
1243 mas_for_each(mas_detach, vma, ULONG_MAX) in reattach_vmas()
1244 vma_mark_detached(vma, false); in reattach_vmas()
1269 if (vms->start > vms->vma->vm_start) { in vms_gather_munmap_vmas()
1276 if (vms->end < vms->vma->vm_end && in vms_gather_munmap_vmas()
1277 vms->vma->vm_mm->map_count >= sysctl_max_map_count) { in vms_gather_munmap_vmas()
1283 if (!can_modify_vma(vms->vma)) { in vms_gather_munmap_vmas()
1288 error = __split_vma(vms->vmi, vms->vma, vms->start, 1); in vms_gather_munmap_vmas()
1408 struct vma_iterator *vmi, struct vm_area_struct *vma, in init_vma_munmap() argument
1413 vms->vma = vma; in init_vma_munmap()
1414 if (vma) { in init_vma_munmap()
1444 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, in do_vmi_align_munmap() argument
1455 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); in do_vmi_align_munmap()
1496 struct vm_area_struct *vma; in do_vmi_munmap() local
1506 vma = vma_find(vmi, end); in do_vmi_munmap()
1507 if (!vma) { in do_vmi_munmap()
1513 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); in do_vmi_munmap()
1531 struct vm_area_struct *vma = vmg->vma; in vma_modify() local
1548 (vma->vm_start != start || vma->vm_end != end)); in vma_modify()
1551 if (vma->vm_start < start) { in vma_modify()
1552 int err = split_vma(vmg->vmi, vma, start, 1); in vma_modify()
1559 if (vma->vm_end > end) { in vma_modify()
1560 int err = split_vma(vmg->vmi, vma, end, 0); in vma_modify()
1566 return vma; in vma_modify()
1571 struct vm_area_struct *vma, unsigned long start, unsigned long end, in vma_modify_flags()
1574 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); in vma_modify_flags()
1584 struct vm_area_struct *vma, in vma_modify_flags_name() argument
1590 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); in vma_modify_flags_name()
1601 struct vm_area_struct *vma, in vma_modify_policy() argument
1605 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); in vma_modify_policy()
1615 struct vm_area_struct *vma, in vma_modify_flags_uffd() argument
1621 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); in vma_modify_flags_uffd()
1636 struct vm_area_struct *vma, in vma_merge_extend()
1639 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); in vma_merge_extend()
1642 vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */ in vma_merge_extend()
1669 struct vm_area_struct *vma) in unlink_file_vma_batch_add() argument
1671 if (vma->vm_file == NULL) in unlink_file_vma_batch_add()
1674 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || in unlink_file_vma_batch_add()
1678 vb->vmas[vb->count] = vma; in unlink_file_vma_batch_add()
1692 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument
1694 struct file *file = vma->vm_file; in unlink_file_vma()
1700 __remove_shared_vm_struct(vma, mapping); in unlink_file_vma()
1705 void vma_link_file(struct vm_area_struct *vma) in vma_link_file() argument
1707 struct file *file = vma->vm_file; in vma_link_file()
1713 __vma_link_file(vma, mapping); in vma_link_file()
1718 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) in vma_link() argument
1722 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); in vma_link()
1723 if (vma_iter_prealloc(&vmi, vma)) in vma_link()
1726 vma_start_write(vma); in vma_link()
1727 vma_iter_store(&vmi, vma); in vma_link()
1728 vma_link_file(vma); in vma_link()
1742 struct vm_area_struct *vma = *vmap; in copy_vma() local
1743 unsigned long vma_start = vma->vm_start; in copy_vma()
1744 struct mm_struct *mm = vma->vm_mm; in copy_vma()
1748 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len); in copy_vma()
1754 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { in copy_vma()
1763 vmg.vma = NULL; /* New VMA range. */ in copy_vma()
1787 *vmap = vma = new_vma; in copy_vma()
1789 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); in copy_vma()
1791 new_vma = vm_area_dup(vma); in copy_vma()
1795 if (vma_dup_policy(vma, new_vma)) in copy_vma()
1797 if (anon_vma_clone(new_vma, vma)) in copy_vma()
1889 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) in find_mergeable_anon_vma() argument
1893 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); in find_mergeable_anon_vma()
1898 anon_vma = reusable_anon_vma(next, vma, next); in find_mergeable_anon_vma()
1904 VM_BUG_ON_VMA(prev != vma, vma); in find_mergeable_anon_vma()
1908 anon_vma = reusable_anon_vma(prev, prev, vma); in find_mergeable_anon_vma()
1928 static bool vma_is_shared_writable(struct vm_area_struct *vma) in vma_is_shared_writable() argument
1930 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == in vma_is_shared_writable()
1934 static bool vma_fs_can_writeback(struct vm_area_struct *vma) in vma_fs_can_writeback() argument
1937 if (vma->vm_flags & VM_PFNMAP) in vma_fs_can_writeback()
1940 return vma->vm_file && vma->vm_file->f_mapping && in vma_fs_can_writeback()
1941 mapping_can_writeback(vma->vm_file->f_mapping); in vma_fs_can_writeback()
1948 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) in vma_needs_dirty_tracking() argument
1951 if (!vma_is_shared_writable(vma)) in vma_needs_dirty_tracking()
1955 if (vm_ops_needs_writenotify(vma->vm_ops)) in vma_needs_dirty_tracking()
1962 return vma_fs_can_writeback(vma); in vma_needs_dirty_tracking()
1971 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) in vma_wants_writenotify() argument
1974 if (!vma_is_shared_writable(vma)) in vma_wants_writenotify()
1978 if (vm_ops_needs_writenotify(vma->vm_ops)) in vma_wants_writenotify()
1984 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) in vma_wants_writenotify()
1991 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) in vma_wants_writenotify()
1995 if (userfaultfd_wp(vma)) in vma_wants_writenotify()
1999 return vma_fs_can_writeback(vma); in vma_wants_writenotify()
2085 struct vm_area_struct *vma; in mm_take_all_locks() local
2099 for_each_vma(vmi, vma) { in mm_take_all_locks()
2102 vma_start_write(vma); in mm_take_all_locks()
2106 for_each_vma(vmi, vma) { in mm_take_all_locks()
2109 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
2110 is_vm_hugetlb_page(vma)) in mm_take_all_locks()
2111 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
2115 for_each_vma(vmi, vma) { in mm_take_all_locks()
2118 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
2119 !is_vm_hugetlb_page(vma)) in mm_take_all_locks()
2120 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
2124 for_each_vma(vmi, vma) { in mm_take_all_locks()
2127 if (vma->anon_vma) in mm_take_all_locks()
2128 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_take_all_locks()
2181 struct vm_area_struct *vma; in mm_drop_all_locks() local
2188 for_each_vma(vmi, vma) { in mm_drop_all_locks()
2189 if (vma->anon_vma) in mm_drop_all_locks()
2190 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_drop_all_locks()
2192 if (vma->vm_file && vma->vm_file->f_mapping) in mm_drop_all_locks()
2193 vm_unlock_mapping(vma->vm_file->f_mapping); in mm_drop_all_locks()
2265 vms->vma = vma_find(vmi, map->end); in __mmap_prepare()
2266 init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf, in __mmap_prepare()
2270 if (vms->vma) { in __mmap_prepare()
2320 struct vm_area_struct *vma) in __mmap_new_file_vma() argument
2325 vma->vm_file = get_file(map->file); in __mmap_new_file_vma()
2326 error = mmap_file(vma->vm_file, vma); in __mmap_new_file_vma()
2328 fput(vma->vm_file); in __mmap_new_file_vma()
2329 vma->vm_file = NULL; in __mmap_new_file_vma()
2331 vma_iter_set(vmi, vma->vm_end); in __mmap_new_file_vma()
2333 unmap_region(&vmi->mas, vma, map->prev, map->next); in __mmap_new_file_vma()
2339 WARN_ON_ONCE(map->addr != vma->vm_start); in __mmap_new_file_vma()
2344 VM_WARN_ON_ONCE(map->flags != vma->vm_flags && in __mmap_new_file_vma()
2346 (vma->vm_flags & VM_MAYWRITE)); in __mmap_new_file_vma()
2349 map->retry_merge = vma->vm_flags != map->flags && !(vma->vm_flags & VM_SPECIAL); in __mmap_new_file_vma()
2350 map->flags = vma->vm_flags; in __mmap_new_file_vma()
2368 struct vm_area_struct *vma; in __mmap_new_vma() local
2375 vma = vm_area_alloc(map->mm); in __mmap_new_vma()
2376 if (!vma) in __mmap_new_vma()
2380 vma_set_range(vma, map->addr, map->end, map->pgoff); in __mmap_new_vma()
2381 vm_flags_init(vma, map->flags); in __mmap_new_vma()
2382 vma->vm_page_prot = vm_get_page_prot(map->flags); in __mmap_new_vma()
2384 if (vma_iter_prealloc(vmi, vma)) { in __mmap_new_vma()
2390 error = __mmap_new_file_vma(map, vma); in __mmap_new_vma()
2392 error = shmem_zero_setup(vma); in __mmap_new_vma()
2394 vma_set_anonymous(vma); in __mmap_new_vma()
2405 vma_start_write(vma); in __mmap_new_vma()
2406 vma_iter_store(vmi, vma); in __mmap_new_vma()
2408 vma_link_file(vma); in __mmap_new_vma()
2414 if (!vma_is_anonymous(vma)) in __mmap_new_vma()
2415 khugepaged_enter_vma(vma, map->flags); in __mmap_new_vma()
2416 ksm_add_vma(vma); in __mmap_new_vma()
2417 *vmap = vma; in __mmap_new_vma()
2423 vm_area_free(vma); in __mmap_new_vma()
2434 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma) in __mmap_complete() argument
2437 unsigned long vm_flags = vma->vm_flags; in __mmap_complete()
2439 perf_event_mmap(vma); in __mmap_complete()
2444 vm_stat_account(mm, vma->vm_flags, map->pglen); in __mmap_complete()
2446 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || in __mmap_complete()
2447 is_vm_hugetlb_page(vma) || in __mmap_complete()
2448 vma == get_gate_vma(mm)) in __mmap_complete()
2449 vm_flags_clear(vma, VM_LOCKED_MASK); in __mmap_complete()
2454 if (vma->vm_file) in __mmap_complete()
2455 uprobe_mmap(vma); in __mmap_complete()
2464 vm_flags_set(vma, VM_SOFTDIRTY); in __mmap_complete()
2466 vma_set_page_prot(vma); in __mmap_complete()
2474 struct vm_area_struct *vma = NULL; in __mmap_region() local
2487 vma = vma_merge_new_range(&vmg); in __mmap_region()
2491 if (!vma) { in __mmap_region()
2492 error = __mmap_new_vma(&map, &vma); in __mmap_region()
2500 VMG_MMAP_STATE(vmg, &map, vma); in __mmap_region()
2505 vma = merged; in __mmap_region()
2508 __mmap_complete(&map, vma); in __mmap_region()
2592 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, in do_brk_flags() argument
2615 if (vma && vma->vm_end == addr) { in do_brk_flags()
2618 vmg.prev = vma; in do_brk_flags()
2628 if (vma) in do_brk_flags()
2631 vma = vm_area_alloc(mm); in do_brk_flags()
2632 if (!vma) in do_brk_flags()
2635 vma_set_anonymous(vma); in do_brk_flags()
2636 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); in do_brk_flags()
2637 vm_flags_init(vma, flags); in do_brk_flags()
2638 vma->vm_page_prot = vm_get_page_prot(flags); in do_brk_flags()
2639 vma_start_write(vma); in do_brk_flags()
2640 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) in do_brk_flags()
2645 ksm_add_vma(vma); in do_brk_flags()
2647 perf_event_mmap(vma); in do_brk_flags()
2652 vm_flags_set(vma, VM_SOFTDIRTY); in do_brk_flags()
2656 vm_area_free(vma); in do_brk_flags()
2776 static int acct_stack_growth(struct vm_area_struct *vma, in acct_stack_growth() argument
2779 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2783 if (!may_expand_vm(mm, vma->vm_flags, grow)) in acct_stack_growth()
2791 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) in acct_stack_growth()
2795 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
2796 vma->vm_end - size; in acct_stack_growth()
2797 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2815 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
2817 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
2821 VMA_ITERATOR(vmi, mm, vma->vm_start); in expand_upwards()
2823 if (!(vma->vm_flags & VM_GROWSUP)) in expand_upwards()
2841 next = find_vma_intersection(mm, vma->vm_end, gap_addr); in expand_upwards()
2851 vma_iter_config(&vmi, vma->vm_start, address); in expand_upwards()
2852 if (vma_iter_prealloc(&vmi, vma)) in expand_upwards()
2856 if (unlikely(anon_vma_prepare(vma))) { in expand_upwards()
2862 vma_start_write(vma); in expand_upwards()
2864 anon_vma_lock_write(vma->anon_vma); in expand_upwards()
2867 if (address > vma->vm_end) { in expand_upwards()
2870 size = address - vma->vm_start; in expand_upwards()
2871 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
2874 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { in expand_upwards()
2875 error = acct_stack_growth(vma, size, grow); in expand_upwards()
2877 if (vma->vm_flags & VM_LOCKED) in expand_upwards()
2879 vm_stat_account(mm, vma->vm_flags, grow); in expand_upwards()
2880 anon_vma_interval_tree_pre_update_vma(vma); in expand_upwards()
2881 vma->vm_end = address; in expand_upwards()
2883 vma_iter_store(&vmi, vma); in expand_upwards()
2884 anon_vma_interval_tree_post_update_vma(vma); in expand_upwards()
2886 perf_event_mmap(vma); in expand_upwards()
2890 anon_vma_unlock_write(vma->anon_vma); in expand_upwards()
2901 int expand_downwards(struct vm_area_struct *vma, unsigned long address) in expand_downwards() argument
2903 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
2906 VMA_ITERATOR(vmi, mm, vma->vm_start); in expand_downwards()
2908 if (!(vma->vm_flags & VM_GROWSDOWN)) in expand_downwards()
2928 vma_iter_next_range_limit(&vmi, vma->vm_start); in expand_downwards()
2930 vma_iter_config(&vmi, address, vma->vm_end); in expand_downwards()
2931 if (vma_iter_prealloc(&vmi, vma)) in expand_downwards()
2935 if (unlikely(anon_vma_prepare(vma))) { in expand_downwards()
2941 vma_start_write(vma); in expand_downwards()
2943 anon_vma_lock_write(vma->anon_vma); in expand_downwards()
2946 if (address < vma->vm_start) { in expand_downwards()
2949 size = vma->vm_end - address; in expand_downwards()
2950 grow = (vma->vm_start - address) >> PAGE_SHIFT; in expand_downwards()
2953 if (grow <= vma->vm_pgoff) { in expand_downwards()
2954 error = acct_stack_growth(vma, size, grow); in expand_downwards()
2956 if (vma->vm_flags & VM_LOCKED) in expand_downwards()
2958 vm_stat_account(mm, vma->vm_flags, grow); in expand_downwards()
2959 anon_vma_interval_tree_pre_update_vma(vma); in expand_downwards()
2960 vma->vm_start = address; in expand_downwards()
2961 vma->vm_pgoff -= grow; in expand_downwards()
2963 vma_iter_store(&vmi, vma); in expand_downwards()
2964 anon_vma_interval_tree_post_update_vma(vma); in expand_downwards()
2966 perf_event_mmap(vma); in expand_downwards()
2970 anon_vma_unlock_write(vma->anon_vma); in expand_downwards()