Lines Matching refs:vma

43 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,  in can_change_pte_writable()  argument
48 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in can_change_pte_writable()
56 if (pte_needs_soft_dirty_wp(vma, pte)) in can_change_pte_writable()
60 if (userfaultfd_pte_wp(vma, pte)) in can_change_pte_writable()
63 if (!(vma->vm_flags & VM_SHARED)) { in can_change_pte_writable()
70 page = vm_normal_page(vma, addr, pte); in can_change_pte_writable()
87 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, in change_pte_range() argument
99 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
104 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
105 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
108 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
128 folio = vm_normal_folio(vma, addr, oldpte); in change_pte_range()
134 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
169 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range()
192 can_change_pte_writable(vma, addr, ptent)) in change_pte_range()
193 ptent = pte_mkwrite(ptent, vma); in change_pte_range()
195 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range()
251 pte_clear(vma->vm_mm, addr, pte); in change_pte_range()
265 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
279 if (userfaultfd_wp_use_markers(vma)) { in change_pte_range()
286 set_pte_at(vma->vm_mm, addr, pte, in change_pte_range()
303 pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags) in pgtable_split_needed() argument
311 return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma); in pgtable_split_needed()
319 pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags) in pgtable_populate_needed() argument
326 return userfaultfd_wp_use_markers(vma); in pgtable_populate_needed()
335 #define change_pmd_prepare(vma, pmd, cp_flags) \ argument
338 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
339 if (pte_alloc(vma->vm_mm, pmd)) \
350 #define change_prepare(vma, high, low, addr, cp_flags) \ argument
353 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
354 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
362 struct vm_area_struct *vma, pud_t *pud, unsigned long addr, in change_pmd_range() argument
377 ret = change_pmd_prepare(vma, pmd, cp_flags); in change_pmd_range()
389 pgtable_split_needed(vma, cp_flags)) { in change_pmd_range()
390 __split_huge_pmd(vma, pmd, addr, false, NULL); in change_pmd_range()
396 ret = change_pmd_prepare(vma, pmd, cp_flags); in change_pmd_range()
402 ret = change_huge_pmd(tlb, vma, pmd, in change_pmd_range()
417 ret = change_pte_range(tlb, vma, pmd, addr, next, newprot, in change_pmd_range()
432 struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, in change_pud_range() argument
446 ret = change_prepare(vma, pudp, pmd, addr, cp_flags); in change_pud_range()
459 vma->vm_mm, addr, end); in change_pud_range()
465 pgtable_split_needed(vma, cp_flags)) { in change_pud_range()
466 __split_huge_pud(vma, pudp, addr); in change_pud_range()
469 ret = change_huge_pud(tlb, vma, pudp, in change_pud_range()
480 pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot, in change_pud_range()
491 struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, in change_p4d_range() argument
501 ret = change_prepare(vma, p4d, pud, addr, cp_flags); in change_p4d_range()
506 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot, in change_p4d_range()
514 struct vm_area_struct *vma, unsigned long addr, in change_protection_range() argument
517 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
524 tlb_start_vma(tlb, vma); in change_protection_range()
527 ret = change_prepare(vma, pgd, p4d, addr, cp_flags); in change_protection_range()
534 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot, in change_protection_range()
538 tlb_end_vma(tlb, vma); in change_protection_range()
544 struct vm_area_struct *vma, unsigned long start, in change_protection() argument
547 pgprot_t newprot = vma->vm_page_prot; in change_protection()
564 if (is_vm_hugetlb_page(vma)) in change_protection()
565 pages = hugetlb_change_protection(vma, start, end, newprot, in change_protection()
568 pages = change_protection_range(tlb, vma, start, end, newprot, in change_protection()
606 struct vm_area_struct *vma, struct vm_area_struct **pprev, in mprotect_fixup() argument
609 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
610 unsigned long oldflags = vma->vm_flags; in mprotect_fixup()
616 if (!can_modify_vma(vma)) in mprotect_fixup()
620 *pprev = vma; in mprotect_fixup()
630 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup()
661 } else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) && in mprotect_fixup()
662 !vma->anon_vma) { in mprotect_fixup()
666 vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags); in mprotect_fixup()
667 if (IS_ERR(vma)) { in mprotect_fixup()
668 error = PTR_ERR(vma); in mprotect_fixup()
672 *pprev = vma; in mprotect_fixup()
678 vma_start_write(vma); in mprotect_fixup()
679 vm_flags_reset(vma, newflags); in mprotect_fixup()
680 if (vma_wants_manual_pte_write_upgrade(vma)) in mprotect_fixup()
682 vma_set_page_prot(vma); in mprotect_fixup()
684 change_protection(tlb, vma, start, end, mm_cp_flags); in mprotect_fixup()
695 populate_vma_page_range(vma, start, end, NULL); in mprotect_fixup()
700 perf_event_mmap(vma); in mprotect_fixup()
715 struct vm_area_struct *vma, *prev; in do_mprotect_pkey() local
754 vma = vma_find(&vmi, end); in do_mprotect_pkey()
756 if (!vma) in do_mprotect_pkey()
760 if (vma->vm_start >= end) in do_mprotect_pkey()
762 start = vma->vm_start; in do_mprotect_pkey()
764 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey()
767 if (vma->vm_start > start) in do_mprotect_pkey()
770 end = vma->vm_end; in do_mprotect_pkey()
772 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey()
778 if (start > vma->vm_start) in do_mprotect_pkey()
779 prev = vma; in do_mprotect_pkey()
783 tmp = vma->vm_start; in do_mprotect_pkey()
784 for_each_vma_range(vmi, vma, end) { in do_mprotect_pkey()
789 if (vma->vm_start != tmp) { in do_mprotect_pkey()
795 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey()
805 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); in do_mprotect_pkey()
807 newflags |= (vma->vm_flags & ~mask_off_old_flags); in do_mprotect_pkey()
815 if (map_deny_write_exec(vma->vm_flags, newflags)) { in do_mprotect_pkey()
826 error = security_file_mprotect(vma, reqprot, prot); in do_mprotect_pkey()
830 tmp = vma->vm_end; in do_mprotect_pkey()
834 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey()
835 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey()
840 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()