Lines Matching refs:vma
14 struct vm_area_struct *vma; member
34 struct vm_area_struct *vma; /* The first vma to munmap */ member
79 struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */ member
104 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, in vma_pgoff_offset() argument
107 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); in vma_pgoff_offset()
128 .vma = vma_, \
150 struct vm_area_struct *vma,
154 struct vm_area_struct *vma, gfp_t gfp) in vma_iter_store_gfp() argument
158 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp()
161 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp()
162 mas_store_gfp(&vmi->mas, vma, gfp); in vma_iter_store_gfp()
170 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
178 void remove_vma(struct vm_area_struct *vma, bool unreachable);
180 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
186 struct vm_area_struct *prev, struct vm_area_struct *vma,
194 struct vm_area_struct *vma,
204 struct vm_area_struct *vma,
212 struct vm_area_struct *vma,
223 struct vm_area_struct *vma,
231 struct vm_area_struct *vma);
233 void unlink_file_vma(struct vm_area_struct *vma);
235 void vma_link_file(struct vm_area_struct *vma);
237 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
243 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
245 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
246 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
261 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) in vma_wants_manual_pte_write_upgrade() argument
269 if (vma->vm_flags & VM_SHARED) in vma_wants_manual_pte_write_upgrade()
270 return vma_wants_writenotify(vma, vma->vm_page_prot); in vma_wants_manual_pte_write_upgrade()
271 return !!(vma->vm_flags & VM_WRITE); in vma_wants_manual_pte_write_upgrade()
358 struct vm_area_struct *vma) in vma_iter_prealloc() argument
360 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); in vma_iter_prealloc()
375 struct vm_area_struct *vma) in vma_iter_store() argument
380 vmi->mas.index > vma->vm_start)) { in vma_iter_store()
382 vmi->mas.index, vma->vm_start, vma->vm_start, in vma_iter_store()
383 vma->vm_end, vmi->mas.index, vmi->mas.last); in vma_iter_store()
386 vmi->mas.last < vma->vm_start)) { in vma_iter_store()
388 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, in vma_iter_store()
394 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store()
397 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store()
398 mas_store_prealloc(&vmi->mas, vma); in vma_iter_store()
453 static inline bool vma_is_sealed(struct vm_area_struct *vma) in vma_is_sealed() argument
455 return (vma->vm_flags & VM_SEALED); in vma_is_sealed()
462 static inline bool can_modify_vma(struct vm_area_struct *vma) in can_modify_vma() argument
464 if (unlikely(vma_is_sealed(vma))) in can_modify_vma()
470 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
474 static inline bool can_modify_vma(struct vm_area_struct *vma) in can_modify_vma() argument
479 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) in can_modify_vma_madv() argument
487 int expand_upwards(struct vm_area_struct *vma, unsigned long address);
490 int expand_downwards(struct vm_area_struct *vma, unsigned long address);