Lines Matching refs:mr

174 			 struct mlx5_ib_mr *mr, int flags)  in populate_mtt()  argument
176 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in populate_mtt()
190 struct mlx5_ib_mr *mr, int flags) in mlx5_odp_populate_xlt() argument
193 populate_klm(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt()
195 populate_mtt(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt()
207 struct mlx5_ib_mr *mr = in free_implicit_child_mr_work() local
209 struct mlx5_ib_mr *imr = mr->parent; in free_implicit_child_mr_work()
211 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in free_implicit_child_mr_work()
213 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in free_implicit_child_mr_work()
216 mlx5r_umr_update_xlt(mr->parent, in free_implicit_child_mr_work()
220 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in free_implicit_child_mr_work()
225 static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) in destroy_unused_implicit_child_mr() argument
227 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in destroy_unused_implicit_child_mr()
229 struct mlx5_ib_mr *imr = mr->parent; in destroy_unused_implicit_child_mr()
242 if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) != in destroy_unused_implicit_child_mr()
243 mr) { in destroy_unused_implicit_child_mr()
249 if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault)) in destroy_unused_implicit_child_mr()
250 __xa_erase(&mr_to_mdev(mr)->odp_mkeys, in destroy_unused_implicit_child_mr()
251 mlx5_base_mkey(mr->mmkey.key)); in destroy_unused_implicit_child_mr()
255 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work); in destroy_unused_implicit_child_mr()
256 queue_work(system_unbound_wq, &mr->odp_destroy.work); in destroy_unused_implicit_child_mr()
265 struct mlx5_ib_mr *mr; in mlx5_ib_invalidate_range() local
285 mr = umem_odp->private; in mlx5_ib_invalidate_range()
286 if (!mr) in mlx5_ib_invalidate_range()
316 mlx5r_umr_update_xlt(mr, blk_start_idx, in mlx5_ib_invalidate_range()
327 mlx5r_umr_update_xlt(mr, blk_start_idx, in mlx5_ib_invalidate_range()
335 mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations); in mlx5_ib_invalidate_range()
345 if (unlikely(!umem_odp->npages && mr->parent)) in mlx5_ib_invalidate_range()
346 destroy_unused_implicit_child_mr(mr); in mlx5_ib_invalidate_range()
463 struct mlx5_ib_mr *mr; in implicit_get_child_mr() local
473 mr = mlx5_mr_cache_alloc(dev, imr->access_flags, in implicit_get_child_mr()
476 if (IS_ERR(mr)) { in implicit_get_child_mr()
478 return mr; in implicit_get_child_mr()
481 mr->access_flags = imr->access_flags; in implicit_get_child_mr()
482 mr->ibmr.pd = imr->ibmr.pd; in implicit_get_child_mr()
483 mr->ibmr.device = &mr_to_mdev(imr)->ib_dev; in implicit_get_child_mr()
484 mr->umem = &odp->umem; in implicit_get_child_mr()
485 mr->ibmr.lkey = mr->mmkey.key; in implicit_get_child_mr()
486 mr->ibmr.rkey = mr->mmkey.key; in implicit_get_child_mr()
487 mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE; in implicit_get_child_mr()
488 mr->parent = imr; in implicit_get_child_mr()
489 odp->private = mr; in implicit_get_child_mr()
495 refcount_set(&mr->mmkey.usecount, 2); in implicit_get_child_mr()
497 err = mlx5r_umr_update_xlt(mr, 0, in implicit_get_child_mr()
508 ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr, in implicit_get_child_mr()
524 ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), in implicit_get_child_mr()
525 &mr->mmkey, GFP_KERNEL); in implicit_get_child_mr()
531 mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD; in implicit_get_child_mr()
534 mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr); in implicit_get_child_mr()
535 return mr; in implicit_get_child_mr()
540 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in implicit_get_child_mr()
658 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr) in mlx5_ib_free_odp_mr() argument
667 xa_for_each(&mr->implicit_children, idx, mtt) { in mlx5_ib_free_odp_mr()
668 xa_erase(&mr->implicit_children, idx); in mlx5_ib_free_odp_mr()
672 if (mr->null_mmkey.key) { in mlx5_ib_free_odp_mr()
673 xa_erase(&mr_to_mdev(mr)->odp_mkeys, in mlx5_ib_free_odp_mr()
674 mlx5_base_mkey(mr->null_mmkey.key)); in mlx5_ib_free_odp_mr()
676 mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, in mlx5_ib_free_odp_mr()
677 mr->null_mmkey.key); in mlx5_ib_free_odp_mr()
684 static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, in pagefault_real_mr() argument
713 ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags); in pagefault_real_mr()
718 mlx5_ib_err(mr_to_mdev(mr), in pagefault_real_mr()
822 static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt, in pagefault_dmabuf_mr() argument
825 struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem); in pagefault_dmabuf_mr()
845 if (mr->data_direct) in pagefault_dmabuf_mr()
846 err = mlx5r_umr_update_data_direct_ksm_pas(mr, xlt_flags); in pagefault_dmabuf_mr()
848 err = mlx5r_umr_update_mr_pas(mr, xlt_flags); in pagefault_dmabuf_mr()
858 return ib_umem_num_pages(mr->umem); in pagefault_dmabuf_mr()
870 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, in pagefault_mr() argument
873 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in pagefault_mr()
875 if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault) in pagefault_mr()
878 if (mr->umem->is_dmabuf) in pagefault_mr()
879 return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags); in pagefault_mr()
882 u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova; in pagefault_mr()
897 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped, in pagefault_mr()
900 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped, in pagefault_mr()
904 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr) in mlx5_ib_init_odp_mr() argument
908 ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address, in mlx5_ib_init_odp_mr()
909 mr->umem->length, NULL, in mlx5_ib_init_odp_mr()
914 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr) in mlx5_ib_init_dmabuf_mr() argument
918 ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL, in mlx5_ib_init_dmabuf_mr()
982 struct mlx5_ib_mr *mr; in pagefault_single_data_segment() local
1012 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); in pagefault_single_data_segment()
1017 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false); in pagefault_single_data_segment()
1021 mlx5_update_odp_stats_with_handled(mr, faults, ret); in pagefault_single_data_segment()
1519 struct mlx5_ib_mr *mr, *child_mr; in mlx5_ib_mr_memory_pfault_handler() local
1529 mr = child_mr->parent; in mlx5_ib_mr_memory_pfault_handler()
1532 mr = container_of(mmkey, struct mlx5_ib_mr, null_mmkey); in mlx5_ib_mr_memory_pfault_handler()
1535 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); in mlx5_ib_mr_memory_pfault_handler()
1540 ret = pagefault_mr(mr, prefetch_va, prefetch_size, NULL, 0, true); in mlx5_ib_mr_memory_pfault_handler()
1542 ret = pagefault_mr(mr, pfault->memory.va, in mlx5_ib_mr_memory_pfault_handler()
1549 mlx5_update_odp_stats_with_handled(mr, faults, ret); in mlx5_ib_mr_memory_pfault_handler()
1908 struct mlx5_ib_mr *mr; member
1918 mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey); in destroy_prefetch_work()
1928 struct mlx5_ib_mr *mr = NULL; in get_prefetchable_mr() local
1934 mr = ERR_PTR(-ENOENT); in get_prefetchable_mr()
1938 mr = ERR_PTR(-EINVAL); in get_prefetchable_mr()
1942 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); in get_prefetchable_mr()
1944 if (mr->ibmr.pd != pd) { in get_prefetchable_mr()
1945 mr = ERR_PTR(-EPERM); in get_prefetchable_mr()
1951 !mr->umem->writable) { in get_prefetchable_mr()
1952 mr = ERR_PTR(-EPERM); in get_prefetchable_mr()
1959 return mr; in get_prefetchable_mr()
1973 ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt, in mlx5_ib_prefetch_mr_work()
1978 mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret); in mlx5_ib_prefetch_mr_work()
1995 struct mlx5_ib_mr *mr; in init_prefetch_work() local
1997 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); in init_prefetch_work()
1998 if (IS_ERR(mr)) { in init_prefetch_work()
2000 return PTR_ERR(mr); in init_prefetch_work()
2004 work->frags[i].mr = mr; in init_prefetch_work()
2020 struct mlx5_ib_mr *mr; in mlx5_ib_prefetch_sg_list() local
2022 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); in mlx5_ib_prefetch_sg_list()
2023 if (IS_ERR(mr)) in mlx5_ib_prefetch_sg_list()
2024 return PTR_ERR(mr); in mlx5_ib_prefetch_sg_list()
2025 ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length, in mlx5_ib_prefetch_sg_list()
2028 mlx5r_deref_odp_mkey(&mr->mmkey); in mlx5_ib_prefetch_sg_list()
2031 mlx5_update_odp_stats(mr, prefetch, ret); in mlx5_ib_prefetch_sg_list()
2032 mlx5r_deref_odp_mkey(&mr->mmkey); in mlx5_ib_prefetch_sg_list()