/linux-6.14.4/drivers/gpu/drm/i915/ |
D | i915_vma.h | 51 static inline bool i915_vma_is_active(const struct i915_vma *vma) in i915_vma_is_active() argument 53 return !i915_active_is_idle(&vma->active); in i915_vma_is_active() 60 int __must_check _i915_vma_move_to_active(struct i915_vma *vma, 65 i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, in i915_vma_move_to_active() argument 68 return _i915_vma_move_to_active(vma, rq, &rq->fence, flags); in i915_vma_move_to_active() 73 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) in i915_vma_is_ggtt() argument 75 return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); in i915_vma_is_ggtt() 78 static inline bool i915_vma_is_dpt(const struct i915_vma *vma) in i915_vma_is_dpt() argument 80 return i915_is_dpt(vma->vm); in i915_vma_is_dpt() 83 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) in i915_vma_has_ggtt_write() argument [all …]
|
D | i915_vma.c | 48 static inline void assert_vma_held_evict(const struct i915_vma *vma) in assert_vma_held_evict() argument 55 if (kref_read(&vma->vm->ref)) in assert_vma_held_evict() 56 assert_object_held_shared(vma->obj); in assert_vma_held_evict() 66 static void i915_vma_free(struct i915_vma *vma) in i915_vma_free() argument 68 return kmem_cache_free(slab_vmas, vma); in i915_vma_free() 75 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument 79 if (!vma->node.stack) { in vma_print_allocator() 80 drm_dbg(vma->obj->base.dev, in vma_print_allocator() 82 vma->node.start, vma->node.size, reason); in vma_print_allocator() 86 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); in vma_print_allocator() [all …]
|
D | i915_gem_evict.c | 41 static bool dying_vma(struct i915_vma *vma) 43 return !kref_read(&vma->obj->base.refcount); 67 static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) in grab_vma() argument 73 if (i915_gem_object_get_rcu(vma->obj)) { in grab_vma() 74 if (!i915_gem_object_trylock(vma->obj, ww)) { in grab_vma() 75 i915_gem_object_put(vma->obj); in grab_vma() 80 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in grab_vma() 86 static void ungrab_vma(struct i915_vma *vma) in ungrab_vma() argument 88 if (dying_vma(vma)) in ungrab_vma() 91 i915_gem_object_unlock(vma->obj); in ungrab_vma() [all …]
|
/linux-6.14.4/tools/testing/vma/ |
D | vma.c | 18 #define vma_iter_prealloc(vmi, vma) \ argument 19 (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL)) 88 struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags); in alloc_and_link_vma() local 90 if (vma == NULL) in alloc_and_link_vma() 93 if (vma_link(mm, vma)) { in alloc_and_link_vma() 94 vm_area_free(vma); in alloc_and_link_vma() 103 vma->vm_lock_seq = UINT_MAX; in alloc_and_link_vma() 105 return vma; in alloc_and_link_vma() 151 vmg->vma = NULL; in vmg_set_range() 205 struct vm_area_struct *vma; in cleanup_mm() local [all …]
|
D | vma_internal.h | 98 #define vma_policy(vma) NULL argument 362 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 381 int (*access)(struct vm_area_struct *vma, unsigned long addr, 387 const char *(*name)(struct vm_area_struct *vma); 397 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 409 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 417 struct page *(*find_special_page)(struct vm_area_struct *vma, 453 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) in vma_is_shared_maywrite() argument 455 return is_shared_maywrite(vma->vm_flags); in vma_is_shared_maywrite() 467 static inline bool vma_lock_alloc(struct vm_area_struct *vma) in vma_lock_alloc() argument [all …]
|
/linux-6.14.4/mm/ |
D | vma.c | 55 .vma = vma_, \ 63 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; in is_mergeable_vma() local 65 if (!mpol_equal(vmg->policy, vma_policy(vma))) in is_mergeable_vma() 75 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY) in is_mergeable_vma() 77 if (vma->vm_file != vmg->file) in is_mergeable_vma() 79 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) in is_mergeable_vma() 81 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) in is_mergeable_vma() 87 struct anon_vma *anon_vma2, struct vm_area_struct *vma) in is_mergeable_anon_vma() argument 93 if ((!anon_vma1 || !anon_vma2) && (!vma || in is_mergeable_anon_vma() 94 list_is_singular(&vma->anon_vma_chain))) in is_mergeable_anon_vma() [all …]
|
D | mremap.c | 72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pud() argument 86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 92 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd() 105 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument 107 if (vma->vm_file) in take_rmap_locks() 108 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks() 109 if (vma->anon_vma) in take_rmap_locks() 110 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks() 113 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument 115 if (vma->anon_vma) in drop_rmap_locks() [all …]
|
D | nommu.c | 99 struct vm_area_struct *vma; in kobjsize() local 101 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 102 if (vma) in kobjsize() 103 return vma->vm_end - vma->vm_start; in kobjsize() 154 struct vm_area_struct *vma; in __vmalloc_user_flags() local 157 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags() 158 if (vma) in __vmalloc_user_flags() 159 vm_flags_set(vma, VM_USERMAP); in __vmalloc_user_flags() 335 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument 342 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument [all …]
|
D | madvise.c | 102 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) in anon_vma_name() argument 104 mmap_assert_locked(vma->vm_mm); in anon_vma_name() 106 return vma->anon_name; in anon_vma_name() 110 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument 113 struct anon_vma_name *orig_name = anon_vma_name(vma); in replace_anon_vma_name() 116 vma->anon_name = NULL; in replace_anon_vma_name() 124 vma->anon_name = anon_vma_name_reuse(anon_name); in replace_anon_vma_name() 130 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument 145 static int madvise_update_vma(struct vm_area_struct *vma, in madvise_update_vma() argument 150 struct mm_struct *mm = vma->vm_mm; in madvise_update_vma() [all …]
|
D | mprotect.c | 43 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, in can_change_pte_writable() argument 48 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in can_change_pte_writable() 56 if (pte_needs_soft_dirty_wp(vma, pte)) in can_change_pte_writable() 60 if (userfaultfd_pte_wp(vma, pte)) in can_change_pte_writable() 63 if (!(vma->vm_flags & VM_SHARED)) { in can_change_pte_writable() 70 page = vm_normal_page(vma, addr, pte); in can_change_pte_writable() 87 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, in change_pte_range() argument 99 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 104 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range() 105 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range() [all …]
|
D | mseal.c | 19 static inline void set_vma_sealed(struct vm_area_struct *vma) in set_vma_sealed() argument 21 vm_flags_set(vma, VM_SEALED); in set_vma_sealed() 40 static bool is_ro_anon(struct vm_area_struct *vma) in is_ro_anon() argument 43 if (vma->vm_file || vma->vm_flags & VM_SHARED) in is_ro_anon() 50 if (!(vma->vm_flags & VM_WRITE) || in is_ro_anon() 51 !arch_vma_access_permitted(vma, true, false, false)) in is_ro_anon() 60 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) in can_modify_vma_madv() argument 65 if (unlikely(!can_modify_vma(vma) && is_ro_anon(vma))) in can_modify_vma_madv() 72 static int mseal_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma, in mseal_fixup() argument 77 vm_flags_t oldflags = vma->vm_flags; in mseal_fixup() [all …]
|
D | memory.c | 115 if (!userfaultfd_wp(vmf->vma)) in vmf_orig_pte_uffd_wp() 363 struct vm_area_struct *vma, unsigned long floor, in free_pgtables() argument 369 unsigned long addr = vma->vm_start; in free_pgtables() 385 vma_start_write(vma); in free_pgtables() 386 unlink_anon_vmas(vma); in free_pgtables() 388 if (is_vm_hugetlb_page(vma)) { in free_pgtables() 389 unlink_file_vma(vma); in free_pgtables() 390 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables() 394 unlink_file_vma_batch_add(&vb, vma); in free_pgtables() 399 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables() [all …]
|
D | vma.h | 14 struct vm_area_struct *vma; member 34 struct vm_area_struct *vma; /* The first vma to munmap */ member 79 struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */ member 104 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, in vma_pgoff_offset() argument 107 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); in vma_pgoff_offset() 128 .vma = vma_, \ 150 struct vm_area_struct *vma, 154 struct vm_area_struct *vma, gfp_t gfp) in vma_iter_store_gfp() argument 158 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp() 161 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp() [all …]
|
D | mmap.c | 81 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument 83 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() 86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 87 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot() 92 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot() 696 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area() local 708 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area() 710 (!vma || addr + len <= vm_start_gap(vma)) && in generic_get_unmapped_area() 744 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area_topdown() local 759 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area_topdown() [all …]
|
D | rmap.c | 150 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 154 avc->vma = vma; in anon_vma_chain_link() 156 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link() 186 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument 188 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare() 199 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare() 212 if (likely(!vma->anon_vma)) { in __anon_vma_prepare() 213 vma->anon_vma = anon_vma; in __anon_vma_prepare() 214 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare() 334 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument [all …]
|
/linux-6.14.4/include/linux/ |
D | userfaultfd_k.h | 135 extern long uffd_wp_range(struct vm_area_struct *vma, 149 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument 152 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx() 166 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) in uffd_disable_huge_pmd_share() argument 168 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 178 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) in uffd_disable_fault_around() argument 180 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_fault_around() 183 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument 185 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 188 static inline bool userfaultfd_wp(struct vm_area_struct *vma) in userfaultfd_wp() argument [all …]
|
/linux-6.14.4/drivers/gpu/drm/nouveau/ |
D | nouveau_vmm.c | 29 nouveau_vma_unmap(struct nouveau_vma *vma) in nouveau_vma_unmap() argument 31 if (vma->mem) { in nouveau_vma_unmap() 32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap() 33 vma->mem = NULL; in nouveau_vma_unmap() 38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument 40 struct nvif_vma tmp = { .addr = vma->addr }; in nouveau_vma_map() 41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map() 44 vma->mem = mem; in nouveau_vma_map() 51 struct nouveau_vma *vma; in nouveau_vma_find() local 53 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_vma_find() [all …]
|
/linux-6.14.4/drivers/gpu/drm/i915/display/ |
D | intel_fb_pin.c | 32 struct i915_vma *vma; in intel_fb_pin_to_dpt() local 73 vma = i915_vma_instance(obj, vm, view); in intel_fb_pin_to_dpt() 74 if (IS_ERR(vma)) { in intel_fb_pin_to_dpt() 75 ret = PTR_ERR(vma); in intel_fb_pin_to_dpt() 79 if (i915_vma_misplaced(vma, 0, alignment, 0)) { in intel_fb_pin_to_dpt() 80 ret = i915_vma_unbind(vma); in intel_fb_pin_to_dpt() 85 ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL); in intel_fb_pin_to_dpt() 90 vma = ERR_PTR(ret); in intel_fb_pin_to_dpt() 94 vma->display_alignment = max(vma->display_alignment, alignment); in intel_fb_pin_to_dpt() 98 i915_vma_get(vma); in intel_fb_pin_to_dpt() [all …]
|
/linux-6.14.4/drivers/gpu/drm/msm/ |
D | msm_gem_vma.c | 42 void msm_gem_vma_purge(struct msm_gem_vma *vma) in msm_gem_vma_purge() argument 44 struct msm_gem_address_space *aspace = vma->aspace; in msm_gem_vma_purge() 45 unsigned size = vma->node.size; in msm_gem_vma_purge() 48 if (!vma->mapped) in msm_gem_vma_purge() 51 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_vma_purge() 53 vma->mapped = false; in msm_gem_vma_purge() 58 msm_gem_vma_map(struct msm_gem_vma *vma, int prot, in msm_gem_vma_map() argument 61 struct msm_gem_address_space *aspace = vma->aspace; in msm_gem_vma_map() 64 if (GEM_WARN_ON(!vma->iova)) in msm_gem_vma_map() 67 if (vma->mapped) in msm_gem_vma_map() [all …]
|
/linux-6.14.4/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | vmm.c | 802 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local 803 if (vma) { in nvkm_vma_new() 804 vma->addr = addr; in nvkm_vma_new() 805 vma->size = size; in nvkm_vma_new() 806 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new() 807 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new() 809 return vma; in nvkm_vma_new() 813 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument 817 BUG_ON(vma->size == tail); in nvkm_vma_tail() 819 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail() [all …]
|
/linux-6.14.4/drivers/gpu/drm/i915/selftests/ |
D | i915_gem_gtt.c | 397 struct i915_vma *vma; in close_object_list() local 399 vma = i915_vma_instance(obj, vm, NULL); in close_object_list() 400 if (!IS_ERR(vma)) in close_object_list() 401 ignored = i915_vma_unbind_unlocked(vma); in close_object_list() 420 struct i915_vma *vma; in fill_hole() local 461 vma = i915_vma_instance(obj, vm, NULL); in fill_hole() 462 if (IS_ERR(vma)) in fill_hole() 471 err = i915_vma_pin(vma, 0, 0, offset | flags); in fill_hole() 478 if (!drm_mm_node_allocated(&vma->node) || in fill_hole() 479 i915_vma_misplaced(vma, 0, 0, offset | flags)) { in fill_hole() [all …]
|
D | i915_vma.c | 37 static bool assert_vma(struct i915_vma *vma, in assert_vma() argument 43 if (vma->vm != ctx->vm) { in assert_vma() 48 if (vma->size != obj->base.size) { in assert_vma() 50 vma->size, obj->base.size); in assert_vma() 54 if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) { in assert_vma() 56 vma->gtt_view.type); in assert_vma() 68 struct i915_vma *vma; in checked_vma_instance() local 71 vma = i915_vma_instance(obj, vm, view); in checked_vma_instance() 72 if (IS_ERR(vma)) in checked_vma_instance() 73 return vma; in checked_vma_instance() [all …]
|
/linux-6.14.4/drivers/gpu/drm/xe/ |
D | xe_trace_bo.h | 21 #define __dev_name_vma(vma) __dev_name_vm(xe_vma_vm(vma)) argument 84 TP_PROTO(struct xe_vma *vma), 85 TP_ARGS(vma), 88 __string(dev, __dev_name_vma(vma)) 89 __field(struct xe_vma *, vma) 98 __entry->vma = vma; 99 __entry->asid = xe_vma_vm(vma)->usm.asid; 100 __entry->start = xe_vma_start(vma); 101 __entry->end = xe_vma_end(vma) - 1; 102 __entry->ptr = xe_vma_userptr(vma); [all …]
|
D | xe_vm.h | 107 static inline u64 xe_vma_start(struct xe_vma *vma) in xe_vma_start() argument 109 return vma->gpuva.va.addr; in xe_vma_start() 112 static inline u64 xe_vma_size(struct xe_vma *vma) in xe_vma_size() argument 114 return vma->gpuva.va.range; in xe_vma_size() 117 static inline u64 xe_vma_end(struct xe_vma *vma) in xe_vma_end() argument 119 return xe_vma_start(vma) + xe_vma_size(vma); in xe_vma_end() 122 static inline u64 xe_vma_bo_offset(struct xe_vma *vma) in xe_vma_bo_offset() argument 124 return vma->gpuva.gem.offset; in xe_vma_bo_offset() 127 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma) in xe_vma_bo() argument 129 return !vma->gpuva.gem.obj ? NULL : in xe_vma_bo() [all …]
|
/linux-6.14.4/drivers/pci/ |
D | mmap.c | 25 struct vm_area_struct *vma, in pci_mmap_resource_range() argument 32 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range() 36 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range() 38 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range() 41 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range() 45 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range() 47 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range() 49 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range() 50 vma->vm_end - vma->vm_start, in pci_mmap_resource_range() 51 vma->vm_page_prot); in pci_mmap_resource_range() [all …]
|