Lines Matching refs:vma

72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,  in alloc_new_pud()  argument
86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
92 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd()
105 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument
107 if (vma->vm_file) in take_rmap_locks()
108 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks()
109 if (vma->anon_vma) in take_rmap_locks()
110 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks()
113 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument
115 if (vma->anon_vma) in drop_rmap_locks()
116 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks()
117 if (vma->vm_file) in drop_rmap_locks()
118 i_mmap_unlock_write(vma->vm_file->f_mapping); in drop_rmap_locks()
136 static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument
141 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); in move_ptes()
142 struct mm_struct *mm = vma->vm_mm; in move_ptes()
169 take_rmap_locks(vma); in move_ptes()
196 flush_tlb_batched_pending(vma->vm_mm); in move_ptes()
236 flush_tlb_range(vma, old_end - len, old_end); in move_ptes()
243 drop_rmap_locks(vma); in move_ptes()
257 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, in move_normal_pmd() argument
261 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd()
299 if (vma_has_uffd_without_event_remap(vma)) in move_normal_pmd()
306 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd()
323 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); in move_normal_pmd()
332 static inline bool move_normal_pmd(struct vm_area_struct *vma, in move_normal_pmd() argument
341 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_normal_pud() argument
345 struct mm_struct *mm = vma->vm_mm; in move_normal_pud()
363 if (vma_has_uffd_without_event_remap(vma)) in move_normal_pud()
370 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_normal_pud()
382 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); in move_normal_pud()
390 static inline bool move_normal_pud(struct vm_area_struct *vma, in move_normal_pud() argument
399 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pud() argument
403 struct mm_struct *mm = vma->vm_mm; in move_huge_pud()
417 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_huge_pud()
431 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); in move_huge_pud()
439 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pud() argument
497 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, in move_pgt_entry() argument
505 take_rmap_locks(vma); in move_pgt_entry()
509 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
513 moved = move_normal_pud(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
518 move_huge_pmd(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
523 move_huge_pud(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
533 drop_rmap_locks(vma); in move_pgt_entry()
544 static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, in can_align_down() argument
554 if (!for_stack && vma->vm_start != addr_to_align) in can_align_down()
558 if (for_stack && addr_masked >= vma->vm_start) in can_align_down()
565 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; in can_align_down()
590 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument
605 if (is_vm_hugetlb_page(vma)) in move_page_tables()
606 return move_hugetlb_page_tables(vma, new_vma, old_addr, in move_page_tables()
614 try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, in move_page_tables()
617 flush_cache_range(vma, old_addr, old_end); in move_page_tables()
618 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in move_page_tables()
630 old_pud = get_old_pud(vma->vm_mm, old_addr); in move_page_tables()
633 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); in move_page_tables()
638 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, in move_page_tables()
645 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, in move_page_tables()
651 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
654 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
661 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, in move_page_tables()
664 split_huge_pmd(vma, old_pmd, old_addr); in move_page_tables()
671 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, in move_page_tables()
679 if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, in move_page_tables()
696 static unsigned long move_vma(struct vm_area_struct *vma, in move_vma() argument
703 struct mm_struct *mm = vma->vm_mm; in move_vma()
705 unsigned long vm_flags = vma->vm_flags; in move_vma()
725 if (vma->vm_ops && vma->vm_ops->may_split) { in move_vma()
726 if (vma->vm_start != old_addr) in move_vma()
727 err = vma->vm_ops->may_split(vma, old_addr); in move_vma()
728 if (!err && vma->vm_end != old_addr + old_len) in move_vma()
729 err = vma->vm_ops->may_split(vma, old_addr + old_len); in move_vma()
741 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma()
751 vma_start_write(vma); in move_vma()
752 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); in move_vma()
753 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, in move_vma()
761 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
765 } else if (vma->vm_ops && vma->vm_ops->mremap) { in move_vma()
766 err = vma->vm_ops->mremap(new_vma); in move_vma()
775 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, in move_vma()
777 vma = new_vma; in move_vma()
785 if (is_vm_hugetlb_page(vma)) { in move_vma()
786 clear_vma_resv_huge_pages(vma); in move_vma()
791 vm_flags_clear(vma, VM_ACCOUNT); in move_vma()
792 if (vma->vm_start < old_addr) in move_vma()
794 if (vma->vm_end > old_addr + old_len) in move_vma()
808 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); in move_vma()
811 if (unlikely(vma->vm_flags & VM_PFNMAP)) in move_vma()
812 untrack_pfn_clear(vma); in move_vma()
816 vm_flags_clear(vma, VM_LOCKED_MASK); in move_vma()
822 if (new_vma != vma && vma->vm_start == old_addr && in move_vma()
823 vma->vm_end == (old_addr + old_len)) in move_vma()
824 unlink_anon_vmas(vma); in move_vma()
847 vma = vma_prev(&vmi); in move_vma()
848 vm_flags_set(vma, VM_ACCOUNT); in move_vma()
852 vma = vma_next(&vmi); in move_vma()
853 vm_flags_set(vma, VM_ACCOUNT); in move_vma()
871 static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, in resize_is_valid() argument
885 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { in resize_is_valid()
891 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) in resize_is_valid()
895 if (old_len > vma->vm_end - addr) in resize_is_valid()
902 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in resize_is_valid()
903 pgoff += vma->vm_pgoff; in resize_is_valid()
907 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) in resize_is_valid()
910 if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len)) in resize_is_valid()
913 if (!may_expand_vm(mm, vma->vm_flags, in resize_is_valid()
941 struct vm_area_struct *vma; in mremap_to() local
990 vma = vma_lookup(mm, addr); in mremap_to()
991 if (!vma) in mremap_to()
994 ret = resize_is_valid(vma, addr, old_len, new_len, flags); in mremap_to()
1000 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { in mremap_to()
1007 if (vma->vm_flags & VM_MAYSHARE) in mremap_to()
1010 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + in mremap_to()
1011 ((addr - vma->vm_start) >> PAGE_SHIFT), in mremap_to()
1020 return move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, in mremap_to()
1024 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) in vma_expandable() argument
1026 unsigned long end = vma->vm_end + delta; in vma_expandable()
1028 if (end < vma->vm_end) /* overflow */ in vma_expandable()
1030 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) in vma_expandable()
1032 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, in vma_expandable()
1050 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
1101 vma = vma_lookup(mm, addr); in SYSCALL_DEFINE5()
1102 if (!vma) { in SYSCALL_DEFINE5()
1108 if (!can_modify_vma(vma)) { in SYSCALL_DEFINE5()
1113 if (is_vm_hugetlb_page(vma)) { in SYSCALL_DEFINE5()
1114 struct hstate *h __maybe_unused = hstate_vma(vma); in SYSCALL_DEFINE5()
1166 ret = resize_is_valid(vma, addr, old_len, new_len, flags); in SYSCALL_DEFINE5()
1172 if (old_len == vma->vm_end - addr) { in SYSCALL_DEFINE5()
1176 if (vma_expandable(vma, delta)) { in SYSCALL_DEFINE5()
1178 VMA_ITERATOR(vmi, mm, vma->vm_end); in SYSCALL_DEFINE5()
1181 if (vma->vm_flags & VM_ACCOUNT) { in SYSCALL_DEFINE5()
1198 vma = vma_merge_extend(&vmi, vma, delta); in SYSCALL_DEFINE5()
1199 if (!vma) { in SYSCALL_DEFINE5()
1205 vm_stat_account(mm, vma->vm_flags, pages); in SYSCALL_DEFINE5()
1206 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
1223 if (vma->vm_flags & VM_MAYSHARE) in SYSCALL_DEFINE5()
1226 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, in SYSCALL_DEFINE5()
1227 vma->vm_pgoff + in SYSCALL_DEFINE5()
1228 ((addr - vma->vm_start) >> PAGE_SHIFT), in SYSCALL_DEFINE5()
1235 ret = move_vma(vma, addr, old_len, new_len, new_addr, in SYSCALL_DEFINE5()