Lines Matching full:mem

78 		struct kgd_mem *mem)  in kfd_mem_is_attached()  argument
82 list_for_each_entry(entry, &mem->attachments, list) in kfd_mem_is_attached()
115 uint64_t mem; in amdgpu_amdkfd_gpuvm_init_mem_limits() local
121 mem = si.totalram - si.totalhigh; in amdgpu_amdkfd_gpuvm_init_mem_limits()
122 mem *= si.mem_unit; in amdgpu_amdkfd_gpuvm_init_mem_limits()
125 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6); in amdgpu_amdkfd_gpuvm_init_mem_limits()
259 "adev reference can't be null when alloc mem flags vram is set"); in amdgpu_amdkfd_unreserve_mem_limit()
310 * @mem: BO of peer device that is being DMA mapped. Provides parameters
316 struct kgd_mem *mem, struct amdgpu_bo **bo_out) in create_dmamap_sg_bo() argument
322 ret = amdgpu_bo_reserve(mem->bo, false); in create_dmamap_sg_bo()
326 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) in create_dmamap_sg_bo()
327 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT | in create_dmamap_sg_bo()
330 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, in create_dmamap_sg_bo()
332 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0); in create_dmamap_sg_bo()
334 amdgpu_bo_unreserve(mem->bo); in create_dmamap_sg_bo()
342 (*bo_out)->parent = amdgpu_bo_ref(mem->bo); in create_dmamap_sg_bo()
505 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) in get_pte_flags() argument
510 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) in get_pte_flags()
512 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) in get_pte_flags()
551 kfd_mem_dmamap_userptr(struct kgd_mem *mem, in kfd_mem_dmamap_userptr() argument
555 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_userptr()
560 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; in kfd_mem_dmamap_userptr()
619 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
644 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem, in kfd_mem_dmamap_sg_bo() argument
657 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); in kfd_mem_dmamap_sg_bo()
663 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_sg_bo()
665 dma_addr = mem->bo->tbo.sg->sgl->dma_address; in kfd_mem_dmamap_sg_bo()
666 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
669 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); in kfd_mem_dmamap_sg_bo()
675 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
693 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length, in kfd_mem_dmamap_sg_bo()
699 kfd_mem_dmamap_attachment(struct kgd_mem *mem, in kfd_mem_dmamap_attachment() argument
706 return kfd_mem_dmamap_userptr(mem, attachment); in kfd_mem_dmamap_attachment()
710 return kfd_mem_dmamap_sg_bo(mem, attachment); in kfd_mem_dmamap_attachment()
718 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, in kfd_mem_dmaunmap_userptr() argument
722 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_userptr()
752 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
767 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, in kfd_mem_dmaunmap_sg_bo() argument
784 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_sg_bo()
795 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, in kfd_mem_dmaunmap_attachment() argument
802 kfd_mem_dmaunmap_userptr(mem, attachment); in kfd_mem_dmaunmap_attachment()
808 kfd_mem_dmaunmap_sg_bo(mem, attachment); in kfd_mem_dmaunmap_attachment()
815 static int kfd_mem_export_dmabuf(struct kgd_mem *mem) in kfd_mem_export_dmabuf() argument
817 if (!mem->dmabuf) { in kfd_mem_export_dmabuf()
821 bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in kfd_mem_export_dmabuf()
823 mem->gem_handle, in kfd_mem_export_dmabuf()
824 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_export_dmabuf()
828 mem->dmabuf = dmabuf; in kfd_mem_export_dmabuf()
835 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, in kfd_mem_attach_dmabuf() argument
841 ret = kfd_mem_export_dmabuf(mem); in kfd_mem_attach_dmabuf()
845 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); in kfd_mem_attach_dmabuf()
868 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, in kfd_mem_attach() argument
871 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in kfd_mem_attach()
872 unsigned long bo_size = mem->bo->tbo.base.size; in kfd_mem_attach()
873 uint64_t va = mem->va; in kfd_mem_attach()
894 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || in kfd_mem_attach()
895 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || in kfd_mem_attach()
896 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { in kfd_mem_attach()
897 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM) in kfd_mem_attach()
913 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || in kfd_mem_attach()
914 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) || in kfd_mem_attach()
915 (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) || in kfd_mem_attach()
922 bo[i] = mem->bo; in kfd_mem_attach()
929 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { in kfd_mem_attach()
932 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); in kfd_mem_attach()
936 } else if (mem->bo->tbo.type == ttm_bo_type_sg) { in kfd_mem_attach()
937 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL || in kfd_mem_attach()
938 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP), in kfd_mem_attach()
941 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); in kfd_mem_attach()
945 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT || in kfd_mem_attach()
946 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) { in kfd_mem_attach()
948 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); in kfd_mem_attach()
978 attachment[i]->pte_flags = get_pte_flags(adev, mem); in kfd_mem_attach()
980 list_add(&attachment[i]->list, &mem->attachments); in kfd_mem_attach()
1018 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, in add_kgd_mem_to_kfd_bo_list() argument
1024 list_add_tail(&mem->validate_list, in add_kgd_mem_to_kfd_bo_list()
1027 list_add_tail(&mem->validate_list, &process_info->kfd_bo_list); in add_kgd_mem_to_kfd_bo_list()
1031 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, in remove_kgd_mem_from_kfd_bo_list() argument
1035 list_del(&mem->validate_list); in remove_kgd_mem_from_kfd_bo_list()
1051 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, in init_user_pages() argument
1054 struct amdkfd_process_info *process_info = mem->process_info; in init_user_pages()
1055 struct amdgpu_bo *bo = mem->bo; in init_user_pages()
1083 mem->invalid++; in init_user_pages()
1103 amdgpu_bo_placement_from_domain(bo, mem->domain); in init_user_pages()
1141 * @mem: KFD BO structure.
1145 static int reserve_bo_and_vm(struct kgd_mem *mem, in reserve_bo_and_vm() argument
1149 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_vm()
1155 ctx->sync = &mem->sync; in reserve_bo_and_vm()
1178 * @mem: KFD BO structure.
1186 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, in reserve_bo_and_cond_vms() argument
1191 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_cond_vms()
1194 ctx->sync = &mem->sync; in reserve_bo_and_cond_vms()
1199 list_for_each_entry(entry, &mem->attachments, list) { in reserve_bo_and_cond_vms()
1249 static int unmap_bo_from_gpuvm(struct kgd_mem *mem, in unmap_bo_from_gpuvm() argument
1271 static int update_gpuvm_pte(struct kgd_mem *mem, in update_gpuvm_pte() argument
1279 ret = kfd_mem_dmamap_attachment(mem, entry); in update_gpuvm_pte()
1293 static int map_bo_to_gpuvm(struct kgd_mem *mem, in map_bo_to_gpuvm() argument
1313 ret = update_gpuvm_pte(mem, entry, sync); in map_bo_to_gpuvm()
1322 unmap_bo_from_gpuvm(mem, entry, sync); in map_bo_to_gpuvm()
1323 kfd_mem_dmaunmap_attachment(mem, entry); in map_bo_to_gpuvm()
1717 void *drm_priv, struct kgd_mem **mem, in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() argument
1786 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1787 if (!*mem) { in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1791 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1792 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1793 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1799 if ((*mem)->aql_queue) in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1803 (*mem)->alloc_flags = flags; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1805 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1815 va, (*mem)->aql_queue ? size << 1 : size, in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1830 ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1838 bo->kfd_bo = *mem; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1839 (*mem)->bo = bo; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1843 (*mem)->va = va; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1844 (*mem)->domain = domain; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1845 (*mem)->mapped_to_gpu_memory = 0; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1846 (*mem)->process_info = avm->process_info; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1848 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1852 ret = init_user_pages(*mem, user_addr, criu_resume); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1883 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1884 drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1888 /* Don't unreserve system mem limit twice */ in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1893 amdgpu_sync_free(&(*mem)->sync); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1894 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1898 kfree(*mem); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1908 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu() argument
1911 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1912 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1913 bool use_release_notifier = (mem->bo->kfd_bo == mem); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1920 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1923 if (mem->alloc_flags & in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1926 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1929 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1930 is_imported = mem->is_imported; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1931 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1932 /* lock is not needed after this, since mem is unused and will in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1938 mem->va, bo_size); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1944 list_del(&mem->validate_list); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1948 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1949 amdgpu_hmm_unregister(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1951 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1955 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1959 amdgpu_amdkfd_remove_eviction_fence(mem->bo, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1961 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1962 mem->va + bo_size * (1 + mem->aql_queue)); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1965 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1966 kfd_mem_dmaunmap_attachment(mem, entry); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1973 amdgpu_sync_free(&mem->sync); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1978 if (mem->bo->tbo.sg) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1979 sg_free_table(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1980 kfree(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1989 (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1991 mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1998 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1999 drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2000 if (mem->dmabuf) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2001 dma_buf_put(mem->dmabuf); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2002 mem->dmabuf = NULL; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2004 mutex_destroy(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2007 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2010 drm_gem_object_put(&mem->bo->tbo.base); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2017 kfree(mem); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2023 struct amdgpu_device *adev, struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu() argument
2035 bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2045 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2052 mutex_lock(&mem->process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2053 is_invalid_userptr = !!mem->invalid; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2054 mutex_unlock(&mem->process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2057 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2059 domain = mem->domain; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2063 mem->va, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2064 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2067 if (!kfd_mem_is_attached(avm, mem)) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2068 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2073 ret = reserve_bo_and_vm(mem, avm, &ctx); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2090 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2097 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2111 mem->mapped_to_gpu_memory++; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2113 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2123 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2124 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2128 int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) in amdgpu_amdkfd_gpuvm_dmaunmap_mem() argument
2136 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2138 ret = amdgpu_bo_reserve(mem->bo, true); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2142 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2149 kfd_mem_dmaunmap_attachment(mem, entry); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2152 amdgpu_bo_unreserve(mem->bo); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2154 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2160 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu() argument
2163 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2168 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2170 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2184 mem->va, in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2185 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2188 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2195 ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2201 mem->mapped_to_gpu_memory--; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2203 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2209 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2214 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) in amdgpu_amdkfd_gpuvm_sync_memory() argument
2221 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2222 amdgpu_sync_clone(&mem->sync, &sync); in amdgpu_amdkfd_gpuvm_sync_memory()
2223 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2280 * @mem: Buffer object to be mapped for CPU access
2291 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() argument
2295 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2302 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2323 bo, mem->process_info->eviction_fence); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2330 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2338 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2345 * @mem: Buffer object to be unmapped for CPU access
2351 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) in amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel() argument
2353 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel()
2362 struct kfd_vm_fault_info *mem) in amdgpu_amdkfd_gpuvm_get_vm_fault_info() argument
2365 *mem = *adev->gmc.vm_fault_info; in amdgpu_amdkfd_gpuvm_get_vm_fault_info()
2376 struct kgd_mem **mem, uint64_t *size, in import_obj_create() argument
2389 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in import_obj_create()
2390 if (!*mem) in import_obj_create()
2403 INIT_LIST_HEAD(&(*mem)->attachments); in import_obj_create()
2404 mutex_init(&(*mem)->lock); in import_obj_create()
2406 (*mem)->alloc_flags = in import_obj_create()
2413 (*mem)->dmabuf = dma_buf; in import_obj_create()
2414 (*mem)->bo = bo; in import_obj_create()
2415 (*mem)->va = va; in import_obj_create()
2416 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && in import_obj_create()
2420 (*mem)->mapped_to_gpu_memory = 0; in import_obj_create()
2421 (*mem)->process_info = avm->process_info; in import_obj_create()
2422 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); in import_obj_create()
2423 amdgpu_sync_create(&(*mem)->sync); in import_obj_create()
2424 (*mem)->is_imported = true; in import_obj_create()
2429 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain, in import_obj_create()
2438 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); in import_obj_create()
2441 kfree(*mem); in import_obj_create()
2447 struct kgd_mem **mem, uint64_t *size, in amdgpu_amdkfd_gpuvm_import_dmabuf_fd() argument
2464 ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size, in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2469 (*mem)->gem_handle = handle; in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2480 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_export_dmabuf() argument
2485 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2486 ret = kfd_mem_export_dmabuf(mem); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2490 get_dma_buf(mem->dmabuf); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2491 *dma_buf = mem->dmabuf; in amdgpu_amdkfd_gpuvm_export_dmabuf()
2493 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2508 unsigned long cur_seq, struct kgd_mem *mem) in amdgpu_amdkfd_evict_userptr() argument
2510 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_evict_userptr()
2522 mem->invalid++; in amdgpu_amdkfd_evict_userptr()
2550 struct kgd_mem *mem, *tmp_mem; in update_invalid_user_pages() local
2559 list_for_each_entry_safe(mem, tmp_mem, in update_invalid_user_pages()
2562 if (mem->invalid) in update_invalid_user_pages()
2563 list_move_tail(&mem->validate_list, in update_invalid_user_pages()
2567 list_for_each_entry(mem, &process_info->userptr_inval_list, in update_invalid_user_pages()
2569 invalid = mem->invalid; in update_invalid_user_pages()
2576 bo = mem->bo; in update_invalid_user_pages()
2578 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); in update_invalid_user_pages()
2579 mem->range = NULL; in update_invalid_user_pages()
2604 &mem->range); in update_invalid_user_pages()
2626 if (mem->invalid != invalid) { in update_invalid_user_pages()
2630 /* set mem valid if mem has hmm range associated */ in update_invalid_user_pages()
2631 if (mem->range) in update_invalid_user_pages()
2632 mem->invalid = 0; in update_invalid_user_pages()
2653 struct kgd_mem *mem, *tmp_mem; in validate_invalid_user_pages() local
2672 list_for_each_entry(mem, &process_info->userptr_inval_list, in validate_invalid_user_pages()
2676 gobj = &mem->bo->tbo.base; in validate_invalid_user_pages()
2689 list_for_each_entry_safe(mem, tmp_mem, in validate_invalid_user_pages()
2694 bo = mem->bo; in validate_invalid_user_pages()
2698 amdgpu_bo_placement_from_domain(bo, mem->domain); in validate_invalid_user_pages()
2712 list_for_each_entry(attachment, &mem->attachments, list) { in validate_invalid_user_pages()
2716 kfd_mem_dmaunmap_attachment(mem, attachment); in validate_invalid_user_pages()
2717 ret = update_gpuvm_pte(mem, attachment, &sync); in validate_invalid_user_pages()
2722 mem->invalid++; in validate_invalid_user_pages()
2746 struct kgd_mem *mem, *tmp_mem; in confirm_valid_user_pages_locked() local
2749 list_for_each_entry_safe(mem, tmp_mem, in confirm_valid_user_pages_locked()
2754 /* keep mem without hmm range at userptr_inval_list */ in confirm_valid_user_pages_locked()
2755 if (!mem->range) in confirm_valid_user_pages_locked()
2758 /* Only check mem with hmm range associated */ in confirm_valid_user_pages_locked()
2760 mem->bo->tbo.ttm, mem->range); in confirm_valid_user_pages_locked()
2762 mem->range = NULL; in confirm_valid_user_pages_locked()
2764 WARN(!mem->invalid, "Invalid BO not marked invalid"); in confirm_valid_user_pages_locked()
2769 if (mem->invalid) { in confirm_valid_user_pages_locked()
2775 list_move_tail(&mem->validate_list, in confirm_valid_user_pages_locked()
2905 struct kgd_mem *mem; in amdgpu_amdkfd_gpuvm_restore_process_bos() local
2932 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2936 gobj = &mem->bo->tbo.base; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2949 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2952 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2953 uint32_t domain = mem->domain; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2993 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2997 list_for_each_entry(attachment, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
3001 kfd_mem_dmaunmap_attachment(mem, attachment); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3002 ret = update_gpuvm_pte(mem, attachment, &sync_obj); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3073 list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
3074 if (mem->bo->tbo.pin_count) in amdgpu_amdkfd_gpuvm_restore_process_bos()
3077 dma_resv_add_fence(mem->bo->tbo.base.resv, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3099 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) in amdgpu_amdkfd_add_gws_to_process() argument
3108 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in amdgpu_amdkfd_add_gws_to_process()
3109 if (!*mem) in amdgpu_amdkfd_add_gws_to_process()
3112 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
3113 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_add_gws_to_process()
3114 (*mem)->bo = amdgpu_bo_ref(gws_bo); in amdgpu_amdkfd_add_gws_to_process()
3115 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; in amdgpu_amdkfd_add_gws_to_process()
3116 (*mem)->process_info = process_info; in amdgpu_amdkfd_add_gws_to_process()
3117 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); in amdgpu_amdkfd_add_gws_to_process()
3118 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
3122 mutex_lock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3145 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3153 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3154 amdgpu_sync_free(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
3155 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); in amdgpu_amdkfd_add_gws_to_process()
3157 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
3158 kfree(*mem); in amdgpu_amdkfd_add_gws_to_process()
3159 *mem = NULL; in amdgpu_amdkfd_add_gws_to_process()
3163 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) in amdgpu_amdkfd_remove_gws_from_process() argument
3167 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; in amdgpu_amdkfd_remove_gws_from_process()
3187 kfree(mem); in amdgpu_amdkfd_remove_gws_from_process()
3211 bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem) in amdgpu_amdkfd_bo_mapped_to_dev() argument
3216 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_bo_mapped_to_dev()
3229 seq_printf(m, "System mem used %lldM out of %lluM\n", in kfd_debugfs_kfd_mem_limits()
3232 seq_printf(m, "TTM mem used %lldM out of %lluM\n", in kfd_debugfs_kfd_mem_limits()