Lines Matching refs:vma
98 #define vma_policy(vma) NULL argument
362 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
381 int (*access)(struct vm_area_struct *vma, unsigned long addr,
387 const char *(*name)(struct vm_area_struct *vma);
397 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
409 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
417 struct page *(*find_special_page)(struct vm_area_struct *vma,
453 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) in vma_is_shared_maywrite() argument
455 return is_shared_maywrite(vma->vm_flags); in vma_is_shared_maywrite()
467 static inline bool vma_lock_alloc(struct vm_area_struct *vma) in vma_lock_alloc() argument
469 vma->vm_lock = calloc(1, sizeof(struct vma_lock)); in vma_lock_alloc()
471 if (!vma->vm_lock) in vma_lock_alloc()
474 init_rwsem(&vma->vm_lock->lock); in vma_lock_alloc()
475 vma->vm_lock_seq = UINT_MAX; in vma_lock_alloc()
481 static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) in vma_mark_detached() argument
485 vma_assert_write_locked(vma); in vma_mark_detached()
486 vma->detached = detached; in vma_mark_detached()
493 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) in vma_init() argument
495 memset(vma, 0, sizeof(*vma)); in vma_init()
496 vma->vm_mm = mm; in vma_init()
497 vma->vm_ops = &vma_dummy_vm_ops; in vma_init()
498 INIT_LIST_HEAD(&vma->anon_vma_chain); in vma_init()
499 vma_mark_detached(vma, false); in vma_init()
504 struct vm_area_struct *vma = calloc(1, sizeof(struct vm_area_struct)); in vm_area_alloc() local
506 if (!vma) in vm_area_alloc()
509 vma_init(vma, mm); in vm_area_alloc()
510 if (!vma_lock_alloc(vma)) { in vm_area_alloc()
511 free(vma); in vm_area_alloc()
515 return vma; in vm_area_alloc()
579 static inline void vma_set_range(struct vm_area_struct *vma, in vma_set_range() argument
583 vma->vm_start = start; in vma_set_range()
584 vma->vm_end = end; in vma_set_range()
585 vma->vm_pgoff = pgoff; in vma_set_range()
632 static inline bool vma_is_anonymous(struct vm_area_struct *vma) in vma_is_anonymous() argument
634 return !vma->vm_ops; in vma_is_anonymous()
645 struct vm_area_struct *vma; in find_vma_prev() local
648 vma = vma_iter_load(&vmi); in find_vma_prev()
650 if (!vma) in find_vma_prev()
651 vma = vma_next(&vmi); in find_vma_prev()
652 return vma; in find_vma_prev()
665 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) in anon_vma_name() argument
670 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument
686 static inline unsigned long vma_pages(struct vm_area_struct *vma) in vma_pages() argument
688 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in vma_pages()
699 static inline void vma_lock_free(struct vm_area_struct *vma) in vma_lock_free() argument
701 free(vma->vm_lock); in vma_lock_free()
704 static inline void __vm_area_free(struct vm_area_struct *vma) in __vm_area_free() argument
706 vma_lock_free(vma); in __vm_area_free()
707 free(vma); in __vm_area_free()
710 static inline void vm_area_free(struct vm_area_struct *vma) in vm_area_free() argument
712 __vm_area_free(vma); in vm_area_free()
732 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
738 (void)vma; in unmap_vmas()
746 struct vm_area_struct *vma, unsigned long floor, in free_pgtables() argument
751 (void)vma; in free_pgtables()
790 static inline void vma_start_write(struct vm_area_struct *vma) in vma_start_write() argument
793 vma->vm_lock_seq++; in vma_start_write()
796 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, in vma_adjust_trans_huge() argument
801 (void)vma; in vma_adjust_trans_huge()
850 static inline void uprobe_munmap(struct vm_area_struct *vma, in uprobe_munmap() argument
853 (void)vma; in uprobe_munmap()
870 static inline void unlink_anon_vmas(struct vm_area_struct *vma) in unlink_anon_vmas() argument
873 vma->anon_vma->was_unlinked = true; in unlink_anon_vmas()
889 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, in userfaultfd_unmap_prep() argument
894 (void)vma; in userfaultfd_unmap_prep()
948 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, in khugepaged_enter_vma() argument
951 (void)vma; in khugepaged_enter_vma()
1012 static inline void vm_flags_init(struct vm_area_struct *vma, in vm_flags_init() argument
1015 vma->__vm_flags = flags; in vm_flags_init()
1018 static inline void vm_flags_set(struct vm_area_struct *vma, in vm_flags_set() argument
1021 vma_start_write(vma); in vm_flags_set()
1022 vma->__vm_flags |= flags; in vm_flags_set()
1025 static inline void vm_flags_clear(struct vm_area_struct *vma, in vm_flags_clear() argument
1028 vma_start_write(vma); in vm_flags_clear()
1029 vma->__vm_flags &= ~flags; in vm_flags_clear()
1042 static inline void vma_set_anonymous(struct vm_area_struct *vma) in vma_set_anonymous() argument
1044 vma->vm_ops = NULL; in vma_set_anonymous()
1065 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1068 static inline void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
1070 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
1074 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags)); in vma_set_page_prot()
1076 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot()
1082 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
1099 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) in stack_guard_start_gap() argument
1101 if (vma->vm_flags & VM_GROWSDOWN) in stack_guard_start_gap()
1105 if (vma->vm_flags & VM_SHADOW_STACK) in stack_guard_start_gap()
1111 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) in vm_start_gap() argument
1113 unsigned long gap = stack_guard_start_gap(vma); in vm_start_gap()
1114 unsigned long vm_start = vma->vm_start; in vm_start_gap()
1117 if (vm_start > vma->vm_start) in vm_start_gap()
1122 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) in vm_end_gap() argument
1124 unsigned long vm_end = vma->vm_end; in vm_end_gap()
1126 if (vma->vm_flags & VM_GROWSUP) { in vm_end_gap()
1128 if (vm_end < vma->vm_end) in vm_end_gap()
1140 static inline bool vma_is_accessible(struct vm_area_struct *vma) in vma_is_accessible() argument
1142 return vma->vm_flags & VM_ACCESS_FLAGS; in vma_is_accessible()
1167 static inline int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument
1175 vma->anon_vma = anon_vma; in __anon_vma_prepare()
1180 static inline int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument
1182 if (likely(vma->anon_vma)) in anon_vma_prepare()
1185 return __anon_vma_prepare(vma); in anon_vma_prepare()