Lines Matching refs:vma

115 	if (!userfaultfd_wp(vmf->vma))  in vmf_orig_pte_uffd_wp()
363 struct vm_area_struct *vma, unsigned long floor, in free_pgtables() argument
369 unsigned long addr = vma->vm_start; in free_pgtables()
385 vma_start_write(vma); in free_pgtables()
386 unlink_anon_vmas(vma); in free_pgtables()
388 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
389 unlink_file_vma(vma); in free_pgtables()
390 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
394 unlink_file_vma_batch_add(&vb, vma); in free_pgtables()
399 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
401 vma = next; in free_pgtables()
406 vma_start_write(vma); in free_pgtables()
407 unlink_anon_vmas(vma); in free_pgtables()
408 unlink_file_vma_batch_add(&vb, vma); in free_pgtables()
411 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
414 vma = next; in free_pgtables()
415 } while (vma); in free_pgtables()
495 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
498 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
527 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
528 index = linear_page_index(vma, addr); in print_bad_pte()
536 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
538 vma->vm_file, in print_bad_pte()
539 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
540 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
591 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() argument
599 if (vma->vm_ops && vma->vm_ops->find_special_page) in vm_normal_page()
600 return vma->vm_ops->find_special_page(vma, addr); in vm_normal_page()
601 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
616 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
622 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
623 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
631 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
632 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
634 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
644 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
657 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, in vm_normal_folio() argument
660 struct page *page = vm_normal_page(vma, addr, pte); in vm_normal_folio()
668 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
677 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
678 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
684 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
685 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
687 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
707 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, in vm_normal_folio_pmd() argument
710 struct page *page = vm_normal_page_pmd(vma, addr, pmd); in vm_normal_folio_pmd()
718 static void restore_exclusive_pte(struct vm_area_struct *vma, in restore_exclusive_pte() argument
728 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in restore_exclusive_pte()
736 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in restore_exclusive_pte()
746 folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); in restore_exclusive_pte()
754 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
760 update_mmu_cache(vma, address, ptep); in restore_exclusive_pte()
768 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, in try_restore_exclusive_pte() argument
775 restore_exclusive_pte(vma, page, addr, src_pte); in try_restore_exclusive_pte()
1055 struct vm_area_struct *vma, unsigned long addr, bool need_zero) in folio_prealloc() argument
1060 new_folio = vma_alloc_zeroed_movable_folio(vma, addr); in folio_prealloc()
1062 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); in folio_prealloc()
1470 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, in zap_install_uffd_wp_if_needed() argument
1478 if (vma_is_anonymous(vma)) in zap_install_uffd_wp_if_needed()
1486 if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval)) in zap_install_uffd_wp_if_needed()
1498 struct vm_area_struct *vma, struct folio *folio, in zap_present_folio_ptes() argument
1515 if (pte_young(ptent) && likely(vma_has_recency(vma))) in zap_present_folio_ptes()
1524 arch_check_zapped_pte(vma, ptent); in zap_present_folio_ptes()
1526 if (unlikely(userfaultfd_pte_wp(vma, ptent))) in zap_present_folio_ptes()
1527 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, in zap_present_folio_ptes()
1531 folio_remove_rmap_ptes(folio, page, nr, vma); in zap_present_folio_ptes()
1534 print_bad_pte(vma, addr, ptent, page); in zap_present_folio_ptes()
1549 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, in zap_present_ptes() argument
1560 page = vm_normal_page(vma, addr, ptent); in zap_present_ptes()
1564 arch_check_zapped_pte(vma, ptent); in zap_present_ptes()
1566 if (userfaultfd_pte_wp(vma, ptent)) in zap_present_ptes()
1567 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, in zap_present_ptes()
1587 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, in zap_present_ptes()
1592 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, in zap_present_ptes()
1598 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, in zap_nonpresent_ptes() argument
1620 WARN_ON_ONCE(!vma_is_anonymous(vma)); in zap_nonpresent_ptes()
1623 folio_remove_rmap_pte(folio, page, vma); in zap_nonpresent_ptes()
1644 if (!vma_is_anonymous(vma) && !zap_drop_markers(details)) in zap_nonpresent_ptes()
1662 clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm); in zap_nonpresent_ptes()
1663 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); in zap_nonpresent_ptes()
1669 struct vm_area_struct *vma, pte_t *pte, in do_zap_pte_range() argument
1694 nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, in do_zap_pte_range()
1698 nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, in do_zap_pte_range()
1705 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1738 nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss, in zap_pte_range()
1766 tlb_flush_rmaps(tlb, vma); in zap_pte_range()
1797 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
1809 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1810 else if (zap_huge_pmd(tlb, vma, pmd, addr)) { in zap_pmd_range()
1830 addr = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1839 struct vm_area_struct *vma, p4d_t *p4d, in zap_pud_range() argument
1852 split_huge_pud(vma, pud, addr); in zap_pud_range()
1853 } else if (zap_huge_pud(tlb, vma, pud, addr)) in zap_pud_range()
1859 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1868 struct vm_area_struct *vma, pgd_t *pgd, in zap_p4d_range() argument
1880 next = zap_pud_range(tlb, vma, p4d, addr, next, details); in zap_p4d_range()
1887 struct vm_area_struct *vma, in unmap_page_range() argument
1895 tlb_start_vma(tlb, vma); in unmap_page_range()
1896 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1901 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1903 tlb_end_vma(tlb, vma); in unmap_page_range()
1908 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
1912 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1915 if (start >= vma->vm_end) in unmap_single_vma()
1917 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1918 if (end <= vma->vm_start) in unmap_single_vma()
1921 if (vma->vm_file) in unmap_single_vma()
1922 uprobe_munmap(vma, start, end); in unmap_single_vma()
1924 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1925 untrack_pfn(vma, 0, 0, mm_wr_locked); in unmap_single_vma()
1928 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
1940 if (vma->vm_file) { in unmap_single_vma()
1943 __unmap_hugepage_range(tlb, vma, start, end, in unmap_single_vma()
1947 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1973 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
1984 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in unmap_vmas()
1990 hugetlb_zap_begin(vma, &start, &end); in unmap_vmas()
1991 unmap_single_vma(tlb, vma, start, end, &details, in unmap_vmas()
1993 hugetlb_zap_end(vma, &details); in unmap_vmas()
1994 vma = mas_find(mas, tree_end - 1); in unmap_vmas()
1995 } while (vma && likely(!xa_is_zero(vma))); in unmap_vmas()
2008 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
2015 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in zap_page_range_single()
2017 hugetlb_zap_begin(vma, &range.start, &range.end); in zap_page_range_single()
2018 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single()
2019 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
2025 unmap_single_vma(&tlb, vma, address, end, details, false); in zap_page_range_single()
2028 hugetlb_zap_end(vma, details); in zap_page_range_single()
2042 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
2045 if (!range_in_vma(vma, address, address + size) || in zap_vma_ptes()
2046 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
2049 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
2085 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma) in vm_mixed_zeropage_allowed() argument
2087 VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP); in vm_mixed_zeropage_allowed()
2094 if (mm_forbids_zeropage(vma->vm_mm)) in vm_mixed_zeropage_allowed()
2097 if (is_cow_mapping(vma->vm_flags)) in vm_mixed_zeropage_allowed()
2100 if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) in vm_mixed_zeropage_allowed()
2111 return vma->vm_ops && vma->vm_ops->pfn_mkwrite && in vm_mixed_zeropage_allowed()
2112 (vma_is_fsdax(vma) || vma->vm_flags & VM_IO); in vm_mixed_zeropage_allowed()
2115 static int validate_page_before_insert(struct vm_area_struct *vma, in validate_page_before_insert() argument
2123 if (!vm_mixed_zeropage_allowed(vma)) in validate_page_before_insert()
2134 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_into_pte_locked() argument
2148 inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); in insert_page_into_pte_locked()
2149 folio_add_file_rmap_pte(folio, page, vma); in insert_page_into_pte_locked()
2151 set_pte_at(vma->vm_mm, addr, pte, pteval); in insert_page_into_pte_locked()
2155 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
2162 retval = validate_page_before_insert(vma, page); in insert_page()
2166 pte = get_locked_pte(vma->vm_mm, addr, &ptl); in insert_page()
2169 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); in insert_page()
2175 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_in_batch_locked() argument
2180 err = validate_page_before_insert(vma, page); in insert_page_in_batch_locked()
2183 return insert_page_into_pte_locked(vma, pte, addr, page, prot); in insert_page_in_batch_locked()
2189 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, in insert_pages() argument
2195 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
2224 int err = insert_page_in_batch_locked(vma, pte, in insert_pages()
2262 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument
2267 if (addr < vma->vm_start || end_addr >= vma->vm_end) in vm_insert_pages()
2269 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_pages()
2270 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
2271 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_pages()
2272 vm_flags_set(vma, VM_MIXEDMAP); in vm_insert_pages()
2275 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); in vm_insert_pages()
2309 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
2312 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
2314 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
2315 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2316 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
2317 vm_flags_set(vma, VM_MIXEDMAP); in vm_insert_page()
2319 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
2336 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, in __vm_map_pages() argument
2339 unsigned long count = vma_pages(vma); in __vm_map_pages()
2340 unsigned long uaddr = vma->vm_start; in __vm_map_pages()
2352 ret = vm_insert_page(vma, uaddr, pages[offset + i]); in __vm_map_pages()
2379 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() argument
2382 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); in vm_map_pages()
2399 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero() argument
2402 return __vm_map_pages(vma, pages, num, 0); in vm_map_pages_zero()
2406 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
2409 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2434 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2435 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) in insert_pfn()
2436 update_mmu_cache(vma, addr, pte); in insert_pfn()
2449 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2453 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
2493 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_prot() argument
2502 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_prot()
2503 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_prot()
2505 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_prot()
2506 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vmf_insert_pfn_prot()
2508 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot()
2514 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); in vmf_insert_pfn_prot()
2516 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, in vmf_insert_pfn_prot()
2541 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn() argument
2544 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vmf_insert_pfn()
2548 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite) in vm_mixed_ok() argument
2551 (mkwrite || !vm_mixed_zeropage_allowed(vma))) in vm_mixed_ok()
2554 if (vma->vm_flags & VM_MIXEDMAP) in vm_mixed_ok()
2565 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, in __vm_insert_mixed() argument
2568 pgprot_t pgprot = vma->vm_page_prot; in __vm_insert_mixed()
2571 if (!vm_mixed_ok(vma, pfn, mkwrite)) in __vm_insert_mixed()
2574 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
2577 track_pfn_insert(vma, &pgprot, pfn); in __vm_insert_mixed()
2599 err = insert_page(vma, addr, page, pgprot); in __vm_insert_mixed()
2601 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); in __vm_insert_mixed()
2612 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed() argument
2615 return __vm_insert_mixed(vma, addr, pfn, false); in vmf_insert_mixed()
2624 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, in vmf_insert_mixed_mkwrite() argument
2627 return __vm_insert_mixed(vma, addr, pfn, true); in vmf_insert_mixed_mkwrite()
2728 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_internal() argument
2734 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_internal()
2758 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range_internal()
2759 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range_internal()
2761 vma->vm_pgoff = pfn; in remap_pfn_range_internal()
2764 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); in remap_pfn_range_internal()
2769 flush_cache_range(vma, addr, end); in remap_pfn_range_internal()
2785 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_notrack() argument
2788 int error = remap_pfn_range_internal(vma, addr, pfn, size, prot); in remap_pfn_range_notrack()
2798 zap_page_range_single(vma, addr, size, NULL); in remap_pfn_range_notrack()
2814 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
2819 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
2823 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); in remap_pfn_range()
2825 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true); in remap_pfn_range()
2845 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
2864 if (vma->vm_pgoff > pages) in vm_iomap_memory()
2866 pfn += vma->vm_pgoff; in vm_iomap_memory()
2867 pages -= vma->vm_pgoff; in vm_iomap_memory()
2870 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
2875 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
3130 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user() local
3131 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user()
3135 if (copy_mc_user_highpage(dst, src, addr, vma)) in __wp_page_copy_user()
3165 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3171 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in __wp_page_copy_user()
3172 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); in __wp_page_copy_user()
3190 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3222 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) in __get_fault_gfp_mask() argument
3224 struct file *vm_file = vma->vm_file; in __get_fault_gfp_mask()
3249 if (vmf->vma->vm_file && in do_page_mkwrite()
3250 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
3253 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
3277 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local
3281 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; in fault_dirty_shared_page()
3295 file_update_time(vma->vm_file); in fault_dirty_shared_page()
3331 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local
3348 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3350 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_reuse()
3351 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3352 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_reuse()
3364 struct vm_area_struct *vma = vmf->vma; in vmf_can_call_fault() local
3366 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) in vmf_can_call_fault()
3368 vma_end_read(vma); in vmf_can_call_fault()
3389 struct vm_area_struct *vma = vmf->vma; in __vmf_anon_prepare() local
3392 if (likely(vma->anon_vma)) in __vmf_anon_prepare()
3395 if (!mmap_read_trylock(vma->vm_mm)) in __vmf_anon_prepare()
3398 if (__anon_vma_prepare(vma)) in __vmf_anon_prepare()
3401 mmap_read_unlock(vma->vm_mm); in __vmf_anon_prepare()
3425 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local
3426 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3444 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); in wp_page_copy()
3491 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3492 entry = mk_pte(&new_folio->page, vma->vm_page_prot); in wp_page_copy()
3500 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_copy()
3510 ptep_clear_flush(vma, vmf->address, vmf->pte); in wp_page_copy()
3511 folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); in wp_page_copy()
3512 folio_add_lru_vma(new_folio, vma); in wp_page_copy()
3515 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_copy()
3539 folio_remove_rmap_pte(old_folio, vmf->page, vma); in wp_page_copy()
3547 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3592 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3593 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3602 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3616 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared() local
3618 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
3627 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3639 struct vm_area_struct *vma = vmf->vma; in wp_page_shared() local
3644 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
3677 struct vm_area_struct *vma) in wp_can_reuse_anon_folio() argument
3719 folio_move_anon_rmap(folio, vma); in wp_can_reuse_anon_folio()
3750 struct vm_area_struct *vma = vmf->vma; in do_wp_page() local
3755 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { in do_wp_page()
3756 if (!userfaultfd_wp_async(vma)) { in do_wp_page()
3768 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_wp_page()
3780 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3781 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3782 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3785 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3794 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in do_wp_page()
3815 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { in do_wp_page()
3839 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
3843 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
3851 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
3854 vma_interval_tree_foreach(vma, root, first_index, last_index) { in unmap_mapping_range_tree()
3855 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
3856 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
3860 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
3861 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3862 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3973 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry() local
3994 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3998 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
4001 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); in remove_device_exclusive_entry()
4013 struct vm_area_struct *vma, in should_try_to_free_swap() argument
4018 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || in should_try_to_free_swap()
4033 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
4046 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
4053 if (vma_is_anonymous(vmf->vma)) in do_pte_missing()
4069 if (unlikely(!userfaultfd_wp(vmf->vma))) in pte_marker_handle_uffd_wp()
4104 struct vm_area_struct *vma = vmf->vma; in __alloc_swap_folio() local
4108 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address); in __alloc_swap_folio()
4113 if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in __alloc_swap_folio()
4202 struct vm_area_struct *vma = vmf->vma; in alloc_swap_folio() local
4216 if (unlikely(userfaultfd_armed(vma))) in alloc_swap_folio()
4232 orders = thp_vma_allowable_orders(vma, vma->vm_flags, in alloc_swap_folio()
4234 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_swap_folio()
4241 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in alloc_swap_folio()
4261 gfp = vma_thp_gfp_mask(vma); in alloc_swap_folio()
4264 folio = vma_alloc_folio(gfp, order, vma, addr); in alloc_swap_folio()
4266 if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in alloc_swap_folio()
4298 struct vm_area_struct *vma = vmf->vma; in do_swap_page() local
4321 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
4332 vma_end_read(vma); in do_swap_page()
4338 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4358 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
4369 folio = swap_cache_get_folio(entry, vma, vmf->address); in do_swap_page()
4430 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4441 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
4473 folio = ksm_might_need_to_copy(folio, vma, vmf->address); in do_swap_page()
4502 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
4542 if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) in do_swap_page()
4544 if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) in do_swap_page()
4622 if (should_try_to_free_swap(folio, vma, vmf->flags)) in do_swap_page()
4625 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); in do_swap_page()
4626 add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); in do_swap_page()
4627 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
4641 if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) && in do_swap_page()
4642 !pte_needs_soft_dirty_wp(vma, pte)) { in do_swap_page()
4643 pte = pte_mkwrite(pte, vma); in do_swap_page()
4652 flush_icache_pages(vma, page, nr_pages); in do_swap_page()
4657 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); in do_swap_page()
4658 folio_add_lru_vma(folio, vma); in do_swap_page()
4668 folio_add_new_anon_rmap(folio, vma, address, rmap_flags); in do_swap_page()
4670 folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, in do_swap_page()
4676 set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); in do_swap_page()
4677 arch_do_swap_page_nr(vma->vm_mm, vma, address, in do_swap_page()
4702 update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); in do_swap_page()
4751 struct vm_area_struct *vma = vmf->vma; in alloc_anon_folio() local
4764 if (unlikely(userfaultfd_armed(vma))) in alloc_anon_folio()
4772 orders = thp_vma_allowable_orders(vma, vma->vm_flags, in alloc_anon_folio()
4774 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_anon_folio()
4802 gfp = vma_thp_gfp_mask(vma); in alloc_anon_folio()
4805 folio = vma_alloc_folio(gfp, order, vma, addr); in alloc_anon_folio()
4807 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in alloc_anon_folio()
4831 return folio_prealloc(vma->vm_mm, vma, vmf->address, true); in alloc_anon_folio()
4841 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page() local
4849 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
4856 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4861 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
4863 vma->vm_page_prot)); in do_anonymous_page()
4864 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4869 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4872 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4876 if (userfaultfd_missing(vma)) { in do_anonymous_page()
4904 entry = mk_pte(&folio->page, vma->vm_page_prot); in do_anonymous_page()
4906 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
4907 entry = pte_mkwrite(pte_mkdirty(entry), vma); in do_anonymous_page()
4909 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in do_anonymous_page()
4913 update_mmu_tlb(vma, addr, vmf->pte); in do_anonymous_page()
4916 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
4920 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4925 if (userfaultfd_missing(vma)) { in do_anonymous_page()
4932 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); in do_anonymous_page()
4934 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); in do_anonymous_page()
4935 folio_add_lru_vma(folio, vma); in do_anonymous_page()
4939 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); in do_anonymous_page()
4942 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
4961 struct vm_area_struct *vma = vmf->vma; in __do_fault() local
4981 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4986 ret = vma->vm_ops->fault(vmf); in __do_fault()
5018 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte() local
5020 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
5025 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
5032 struct vm_area_struct *vma = vmf->vma; in do_set_pmd() local
5044 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags)) in do_set_pmd()
5047 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) in do_set_pmd()
5068 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
5073 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
5077 flush_icache_pages(vma, page, HPAGE_PMD_NR); in do_set_pmd()
5079 entry = mk_huge_pmd(page, vma->vm_page_prot); in do_set_pmd()
5081 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_set_pmd()
5083 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); in do_set_pmd()
5084 folio_add_file_rmap_pmd(folio, page, vma); in do_set_pmd()
5092 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
5094 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
5121 struct vm_area_struct *vma = vmf->vma; in set_pte_range() local
5126 flush_icache_pages(vma, page, nr); in set_pte_range()
5127 entry = mk_pte(page, vma->vm_page_prot); in set_pte_range()
5135 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in set_pte_range()
5139 if (write && !(vma->vm_flags & VM_SHARED)) { in set_pte_range()
5141 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); in set_pte_range()
5142 folio_add_lru_vma(folio, vma); in set_pte_range()
5144 folio_add_file_rmap_ptes(folio, page, nr, vma); in set_pte_range()
5146 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
5149 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); in set_pte_range()
5177 struct vm_area_struct *vma = vmf->vma; in finish_fault() local
5182 !(vma->vm_flags & VM_SHARED); in finish_fault()
5200 if (!(vma->vm_flags & VM_SHARED)) { in finish_fault()
5201 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
5214 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
5215 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
5227 if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) || in finish_fault()
5233 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in finish_fault()
5242 vma_off + (nr_pages - idx) > vma_pages(vma) || in finish_fault()
5253 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
5260 update_mmu_tlb(vma, addr, vmf->pte); in finish_fault()
5272 add_mm_counter(vma->vm_mm, type, nr_pages); in finish_fault()
5345 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in do_fault_around()
5355 pte_off + vma_pages(vmf->vma) - vma_off) - 1; in do_fault_around()
5358 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
5364 ret = vmf->vma->vm_ops->map_pages(vmf, in do_fault_around()
5376 if (!vmf->vma->vm_ops->map_pages) in should_fault_around()
5379 if (uffd_disable_fault_around(vmf->vma)) in should_fault_around()
5420 struct vm_area_struct *vma = vmf->vma; in do_cow_fault() local
5430 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); in do_cow_fault()
5442 if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) { in do_cow_fault()
5462 struct vm_area_struct *vma = vmf->vma; in do_shared_fault() local
5480 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
5512 struct vm_area_struct *vma = vmf->vma; in do_fault() local
5513 struct mm_struct *vm_mm = vma->vm_mm; in do_fault()
5519 if (!vma->vm_ops->fault) { in do_fault()
5520 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
5541 else if (!(vma->vm_flags & VM_SHARED)) in do_fault()
5558 struct vm_area_struct *vma = vmf->vma; in numa_migrate_check() local
5575 if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) in numa_migrate_check()
5587 vma_set_access_pid_bit(vma); in numa_migrate_check()
5601 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_single_mapping() argument
5607 old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte); in numa_rebuild_single_mapping()
5608 pte = pte_modify(old_pte, vma->vm_page_prot); in numa_rebuild_single_mapping()
5611 pte = pte_mkwrite(pte, vma); in numa_rebuild_single_mapping()
5612 ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte); in numa_rebuild_single_mapping()
5613 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); in numa_rebuild_single_mapping()
5616 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_large_mapping() argument
5627 start = max3(addr_start, pt_start, vma->vm_start); in numa_rebuild_large_mapping()
5629 vma->vm_end); in numa_rebuild_large_mapping()
5644 ptent = pte_modify(ptent, vma->vm_page_prot); in numa_rebuild_large_mapping()
5647 can_change_pte_writable(vma, addr, ptent)) in numa_rebuild_large_mapping()
5651 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); in numa_rebuild_large_mapping()
5657 struct vm_area_struct *vma = vmf->vma; in do_numa_page() local
5661 bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma); in do_numa_page()
5680 pte = pte_modify(old_pte, vma->vm_page_prot); in do_numa_page()
5688 can_change_pte_writable(vma, vmf->address, pte)) in do_numa_page()
5691 folio = vm_normal_folio(vma, vmf->address, pte); in do_numa_page()
5702 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { in do_numa_page()
5720 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
5734 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, in do_numa_page()
5737 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, in do_numa_page()
5748 struct vm_area_struct *vma = vmf->vma; in create_huge_pmd() local
5749 if (vma_is_anonymous(vma)) in create_huge_pmd()
5751 if (vma->vm_ops->huge_fault) in create_huge_pmd()
5752 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); in create_huge_pmd()
5759 struct vm_area_struct *vma = vmf->vma; in wp_huge_pmd() local
5763 if (vma_is_anonymous(vma)) { in wp_huge_pmd()
5765 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { in wp_huge_pmd()
5766 if (userfaultfd_wp_async(vmf->vma)) in wp_huge_pmd()
5773 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pmd()
5774 if (vma->vm_ops->huge_fault) { in wp_huge_pmd()
5775 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); in wp_huge_pmd()
5783 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
5792 struct vm_area_struct *vma = vmf->vma; in create_huge_pud() local
5794 if (vma_is_anonymous(vma)) in create_huge_pud()
5796 if (vma->vm_ops->huge_fault) in create_huge_pud()
5797 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); in create_huge_pud()
5806 struct vm_area_struct *vma = vmf->vma; in wp_huge_pud() local
5810 if (vma_is_anonymous(vma)) in wp_huge_pud()
5812 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pud()
5813 if (vma->vm_ops->huge_fault) { in wp_huge_pud()
5814 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); in wp_huge_pud()
5821 __split_huge_pud(vma, vmf->pud, vmf->address); in wp_huge_pud()
5870 vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
5890 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
5896 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
5906 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
5908 update_mmu_cache_range(vmf, vmf->vma, vmf->address, in handle_pte_fault()
5921 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, in handle_pte_fault()
5935 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, in __handle_mm_fault() argument
5939 .vma = vma, in __handle_mm_fault()
5943 .pgoff = linear_page_index(vma, address), in __handle_mm_fault()
5944 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault()
5946 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
5947 unsigned long vm_flags = vma->vm_flags; in __handle_mm_fault()
5962 thp_vma_allowable_order(vma, vm_flags, in __handle_mm_fault()
5997 thp_vma_allowable_order(vma, vm_flags, in __handle_mm_fault()
6013 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
6100 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
6103 current->in_lru_fault = vma_has_recency(vma); in lru_gen_enter_fault()
6111 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
6120 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, in sanitize_fault_flags() argument
6130 if (!is_cow_mapping(vma->vm_flags)) in sanitize_fault_flags()
6134 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) in sanitize_fault_flags()
6137 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && in sanitize_fault_flags()
6138 !is_cow_mapping(vma->vm_flags))) in sanitize_fault_flags()
6162 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
6166 struct mm_struct *mm = vma->vm_mm; in handle_mm_fault()
6172 ret = sanitize_fault_flags(vma, &flags); in handle_mm_fault()
6176 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, in handle_mm_fault()
6183 is_droppable = !!(vma->vm_flags & VM_DROPPABLE); in handle_mm_fault()
6192 lru_gen_enter_fault(vma); in handle_mm_fault()
6194 if (unlikely(is_vm_hugetlb_page(vma))) in handle_mm_fault()
6195 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
6197 ret = __handle_mm_fault(vma, address, flags); in handle_mm_fault()
6292 struct vm_area_struct *vma; in lock_mm_and_find_vma() local
6297 vma = find_vma(mm, addr); in lock_mm_and_find_vma()
6298 if (likely(vma && (vma->vm_start <= addr))) in lock_mm_and_find_vma()
6299 return vma; in lock_mm_and_find_vma()
6305 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { in lock_mm_and_find_vma()
6323 vma = find_vma(mm, addr); in lock_mm_and_find_vma()
6324 if (!vma) in lock_mm_and_find_vma()
6326 if (vma->vm_start <= addr) in lock_mm_and_find_vma()
6328 if (!(vma->vm_flags & VM_GROWSDOWN)) in lock_mm_and_find_vma()
6332 if (expand_stack_locked(vma, addr)) in lock_mm_and_find_vma()
6337 return vma; in lock_mm_and_find_vma()
6355 struct vm_area_struct *vma; in lock_vma_under_rcu() local
6359 vma = mas_walk(&mas); in lock_vma_under_rcu()
6360 if (!vma) in lock_vma_under_rcu()
6363 if (!vma_start_read(vma)) in lock_vma_under_rcu()
6367 if (vma->detached) { in lock_vma_under_rcu()
6368 vma_end_read(vma); in lock_vma_under_rcu()
6381 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in lock_vma_under_rcu()
6385 return vma; in lock_vma_under_rcu()
6388 vma_end_read(vma); in lock_vma_under_rcu()
6481 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma) in pfnmap_lockdep_assert() argument
6484 struct file *file = vma->vm_file; in pfnmap_lockdep_assert()
6489 lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert()
6491 lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert()
6528 struct vm_area_struct *vma = args->vma; in follow_pfnmap_start() local
6530 struct mm_struct *mm = vma->vm_mm; in follow_pfnmap_start()
6538 pfnmap_lockdep_assert(vma); in follow_pfnmap_start()
6540 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in follow_pfnmap_start()
6543 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfnmap_start()
6631 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
6640 struct follow_pfnmap_args args = { .vma = vma, .address = addr }; in generic_access_phys()
6705 struct vm_area_struct *vma = NULL; in __access_remote_vm() local
6707 gup_flags, &vma); in __access_remote_vm()
6711 vma = vma_lookup(mm, addr); in __access_remote_vm()
6712 if (!vma) { in __access_remote_vm()
6713 vma = expand_stack(mm, addr); in __access_remote_vm()
6716 if (!vma) in __access_remote_vm()
6729 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
6730 bytes = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
6743 copy_to_user_page(vma, page, addr, in __access_remote_vm()
6747 copy_from_user_page(vma, page, addr, in __access_remote_vm()
6808 struct vm_area_struct *vma; in print_vma_addr() local
6816 vma = vma_lookup(mm, ip); in print_vma_addr()
6817 if (vma && vma->vm_file) { in print_vma_addr()
6818 struct file *f = vma->vm_file; in print_vma_addr()
6819 ip -= vma->vm_start; in print_vma_addr()
6820 ip += vma->vm_pgoff << PAGE_SHIFT; in print_vma_addr()
6822 vma->vm_start, in print_vma_addr()
6823 vma->vm_end - vma->vm_start); in print_vma_addr()
6939 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
6953 addr + i*PAGE_SIZE, vma)) in copy_user_gigantic_page()
6962 struct vm_area_struct *vma; member
6971 if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) in copy_subpage()
6977 unsigned long addr_hint, struct vm_area_struct *vma) in copy_user_large_folio() argument
6983 .vma = vma, in copy_user_large_folio()
6987 return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages); in copy_user_large_folio()
7052 void vma_pgtable_walk_begin(struct vm_area_struct *vma) in vma_pgtable_walk_begin() argument
7054 if (is_vm_hugetlb_page(vma)) in vma_pgtable_walk_begin()
7055 hugetlb_vma_lock_read(vma); in vma_pgtable_walk_begin()
7058 void vma_pgtable_walk_end(struct vm_area_struct *vma) in vma_pgtable_walk_end() argument
7060 if (is_vm_hugetlb_page(vma)) in vma_pgtable_walk_end()
7061 hugetlb_vma_unlock_read(vma); in vma_pgtable_walk_end()