Lines Matching refs:vma
102 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) in anon_vma_name() argument
104 mmap_assert_locked(vma->vm_mm); in anon_vma_name()
106 return vma->anon_name; in anon_vma_name()
110 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument
113 struct anon_vma_name *orig_name = anon_vma_name(vma); in replace_anon_vma_name()
116 vma->anon_name = NULL; in replace_anon_vma_name()
124 vma->anon_name = anon_vma_name_reuse(anon_name); in replace_anon_vma_name()
130 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument
145 static int madvise_update_vma(struct vm_area_struct *vma, in madvise_update_vma() argument
150 struct mm_struct *mm = vma->vm_mm; in madvise_update_vma()
154 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { in madvise_update_vma()
155 *prev = vma; in madvise_update_vma()
159 vma = vma_modify_flags_name(&vmi, *prev, vma, start, end, new_flags, in madvise_update_vma()
161 if (IS_ERR(vma)) in madvise_update_vma()
162 return PTR_ERR(vma); in madvise_update_vma()
164 *prev = vma; in madvise_update_vma()
167 vma_start_write(vma); in madvise_update_vma()
168 vm_flags_reset(vma, new_flags); in madvise_update_vma()
169 if (!vma->vm_file || vma_is_anon_shmem(vma)) { in madvise_update_vma()
170 error = replace_anon_vma_name(vma, anon_name); in madvise_update_vma()
182 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry() local
194 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in swapin_walk_pmd_entry()
210 vma, addr, &splug); in swapin_walk_pmd_entry()
228 static void shmem_swapin_range(struct vm_area_struct *vma, in shmem_swapin_range() argument
232 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); in shmem_swapin_range()
233 pgoff_t end_index = linear_page_index(vma, end) - 1; in shmem_swapin_range()
249 addr = vma->vm_start + in shmem_swapin_range()
250 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); in shmem_swapin_range()
255 vma, addr, &splug); in shmem_swapin_range()
269 static long madvise_willneed(struct vm_area_struct *vma, in madvise_willneed() argument
273 struct mm_struct *mm = vma->vm_mm; in madvise_willneed()
274 struct file *file = vma->vm_file; in madvise_willneed()
277 *prev = vma; in madvise_willneed()
280 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed()
286 shmem_swapin_range(vma, start, end, file->f_mapping); in madvise_willneed()
308 offset = (loff_t)(start - vma->vm_start) in madvise_willneed()
309 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_willneed()
317 static inline bool can_do_file_pageout(struct vm_area_struct *vma) in can_do_file_pageout() argument
319 if (!vma->vm_file) in can_do_file_pageout()
328 file_inode(vma->vm_file)) || in can_do_file_pageout()
329 file_permission(vma->vm_file, MAY_WRITE) == 0; in can_do_file_pageout()
352 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range() local
364 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) && in madvise_cold_or_pageout_pte_range()
365 !can_do_file_pageout(vma); in madvise_cold_or_pageout_pte_range()
373 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_cold_or_pageout_pte_range()
411 pmdp_invalidate(vma, addr, pmd); in madvise_cold_or_pageout_pte_range()
442 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
467 folio = vm_normal_folio(vma, addr, ptent); in madvise_cold_or_pageout_pte_range()
527 clear_young_dirty_ptes(vma, addr, pte, nr, in madvise_cold_or_pageout_pte_range()
570 struct vm_area_struct *vma, in madvise_cold_page_range() argument
578 tlb_start_vma(tlb, vma); in madvise_cold_page_range()
579 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range()
580 tlb_end_vma(tlb, vma); in madvise_cold_page_range()
583 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) in can_madv_lru_vma() argument
585 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); in can_madv_lru_vma()
588 static long madvise_cold(struct vm_area_struct *vma, in madvise_cold() argument
592 struct mm_struct *mm = vma->vm_mm; in madvise_cold()
595 *prev = vma; in madvise_cold()
596 if (!can_madv_lru_vma(vma)) in madvise_cold()
601 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); in madvise_cold()
608 struct vm_area_struct *vma, in madvise_pageout_page_range() argument
616 tlb_start_vma(tlb, vma); in madvise_pageout_page_range()
617 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range()
618 tlb_end_vma(tlb, vma); in madvise_pageout_page_range()
621 static long madvise_pageout(struct vm_area_struct *vma, in madvise_pageout() argument
625 struct mm_struct *mm = vma->vm_mm; in madvise_pageout()
628 *prev = vma; in madvise_pageout()
629 if (!can_madv_lru_vma(vma)) in madvise_pageout()
638 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) && in madvise_pageout()
639 (vma->vm_flags & VM_MAYSHARE))) in madvise_pageout()
644 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); in madvise_pageout()
657 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range() local
667 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) in madvise_free_pte_range()
704 folio = vm_normal_folio(vma, addr, ptent); in madvise_free_pte_range()
775 clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags); in madvise_free_pte_range()
797 static int madvise_free_single_vma(struct vm_area_struct *vma, in madvise_free_single_vma() argument
800 struct mm_struct *mm = vma->vm_mm; in madvise_free_single_vma()
805 if (!vma_is_anonymous(vma)) in madvise_free_single_vma()
808 range.start = max(vma->vm_start, start_addr); in madvise_free_single_vma()
809 if (range.start >= vma->vm_end) in madvise_free_single_vma()
811 range.end = min(vma->vm_end, end_addr); in madvise_free_single_vma()
812 if (range.end <= vma->vm_start) in madvise_free_single_vma()
822 tlb_start_vma(&tlb, vma); in madvise_free_single_vma()
823 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma()
825 tlb_end_vma(&tlb, vma); in madvise_free_single_vma()
851 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, in madvise_dontneed_single_vma() argument
859 zap_page_range_single(vma, start, end - start, &details); in madvise_dontneed_single_vma()
863 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, in madvise_dontneed_free_valid_vma() argument
868 if (!is_vm_hugetlb_page(vma)) { in madvise_dontneed_free_valid_vma()
874 return !(vma->vm_flags & forbidden); in madvise_dontneed_free_valid_vma()
879 if (start & ~huge_page_mask(hstate_vma(vma))) in madvise_dontneed_free_valid_vma()
888 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); in madvise_dontneed_free_valid_vma()
893 static long madvise_dontneed_free(struct vm_area_struct *vma, in madvise_dontneed_free() argument
898 struct mm_struct *mm = vma->vm_mm; in madvise_dontneed_free()
900 *prev = vma; in madvise_dontneed_free()
901 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) in madvise_dontneed_free()
907 if (!userfaultfd_remove(vma, start, end)) { in madvise_dontneed_free()
911 vma = vma_lookup(mm, start); in madvise_dontneed_free()
912 if (!vma) in madvise_dontneed_free()
918 if (!madvise_dontneed_free_valid_vma(vma, start, &end, in madvise_dontneed_free()
921 if (end > vma->vm_end) { in madvise_dontneed_free()
934 end = vma->vm_end; in madvise_dontneed_free()
949 return madvise_dontneed_single_vma(vma, start, end); in madvise_dontneed_free()
951 return madvise_free_single_vma(vma, start, end); in madvise_dontneed_free()
997 static long madvise_remove(struct vm_area_struct *vma, in madvise_remove() argument
1004 struct mm_struct *mm = vma->vm_mm; in madvise_remove()
1008 if (vma->vm_flags & VM_LOCKED) in madvise_remove()
1011 f = vma->vm_file; in madvise_remove()
1017 if (!vma_is_shared_maywrite(vma)) in madvise_remove()
1020 offset = (loff_t)(start - vma->vm_start) in madvise_remove()
1021 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_remove()
1030 if (userfaultfd_remove(vma, start, end)) { in madvise_remove()
1042 static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked) in is_valid_guard_vma() argument
1054 if (!vma_is_anonymous(vma)) in is_valid_guard_vma()
1057 if ((vma->vm_flags & (VM_MAYWRITE | disallowed)) != VM_MAYWRITE) in is_valid_guard_vma()
1124 static long madvise_guard_install(struct vm_area_struct *vma, in madvise_guard_install() argument
1131 *prev = vma; in madvise_guard_install()
1132 if (!is_valid_guard_vma(vma, /* allow_locked = */false)) in madvise_guard_install()
1142 err = anon_vma_prepare(vma); in madvise_guard_install()
1162 err = walk_page_range_mm(vma->vm_mm, start, end, in madvise_guard_install()
1178 zap_page_range_single(vma, start, end - start, NULL); in madvise_guard_install()
1221 update_mmu_cache(walk->vma, addr, pte); in guard_remove_pte_entry()
1234 static long madvise_guard_remove(struct vm_area_struct *vma, in madvise_guard_remove() argument
1238 *prev = vma; in madvise_guard_remove()
1243 if (!is_valid_guard_vma(vma, /* allow_locked = */true)) in madvise_guard_remove()
1246 return walk_page_range(vma->vm_mm, start, end, in madvise_guard_remove()
1255 static int madvise_vma_behavior(struct vm_area_struct *vma, in madvise_vma_behavior() argument
1262 unsigned long new_flags = vma->vm_flags; in madvise_vma_behavior()
1264 if (unlikely(!can_modify_vma_madv(vma, behavior))) in madvise_vma_behavior()
1269 return madvise_remove(vma, prev, start, end); in madvise_vma_behavior()
1271 return madvise_willneed(vma, prev, start, end); in madvise_vma_behavior()
1273 return madvise_cold(vma, prev, start, end); in madvise_vma_behavior()
1275 return madvise_pageout(vma, prev, start, end); in madvise_vma_behavior()
1279 return madvise_dontneed_free(vma, prev, start, end, behavior); in madvise_vma_behavior()
1293 if (vma->vm_flags & VM_IO) in madvise_vma_behavior()
1299 if (vma->vm_file || vma->vm_flags & VM_SHARED) in madvise_vma_behavior()
1304 if (vma->vm_flags & VM_DROPPABLE) in madvise_vma_behavior()
1312 if ((!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) || in madvise_vma_behavior()
1313 (vma->vm_flags & VM_DROPPABLE)) in madvise_vma_behavior()
1319 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_vma_behavior()
1325 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_vma_behavior()
1330 return madvise_collapse(vma, prev, start, end); in madvise_vma_behavior()
1332 return madvise_guard_install(vma, prev, start, end); in madvise_vma_behavior()
1334 return madvise_guard_remove(vma, prev, start, end); in madvise_vma_behavior()
1337 anon_name = anon_vma_name(vma); in madvise_vma_behavior()
1339 error = madvise_update_vma(vma, prev, start, end, new_flags, in madvise_vma_behavior()
1472 int (*visit)(struct vm_area_struct *vma, in madvise_walk_vmas() argument
1476 struct vm_area_struct *vma; in madvise_walk_vmas() local
1486 vma = find_vma_prev(mm, start, &prev); in madvise_walk_vmas()
1487 if (vma && start > vma->vm_start) in madvise_walk_vmas()
1488 prev = vma; in madvise_walk_vmas()
1494 if (!vma) in madvise_walk_vmas()
1498 if (start < vma->vm_start) { in madvise_walk_vmas()
1500 start = vma->vm_start; in madvise_walk_vmas()
1506 tmp = vma->vm_end; in madvise_walk_vmas()
1511 error = visit(vma, &prev, start, tmp, arg); in madvise_walk_vmas()
1520 vma = find_vma(mm, prev->vm_end); in madvise_walk_vmas()
1522 vma = find_vma(mm, start); in madvise_walk_vmas()
1529 static int madvise_vma_anon_name(struct vm_area_struct *vma, in madvise_vma_anon_name() argument
1537 if (vma->vm_file && !vma_is_anon_shmem(vma)) in madvise_vma_anon_name()
1540 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, in madvise_vma_anon_name()