Lines Matching refs:mr
130 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in destroy_mkey() argument
132 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
134 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); in destroy_mkey()
724 struct mlx5_ib_mr *mr; in _mlx5_mr_cache_alloc() local
727 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in _mlx5_mr_cache_alloc()
728 if (!mr) in _mlx5_mr_cache_alloc()
738 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
743 kfree(mr); in _mlx5_mr_cache_alloc()
747 mr->mmkey.key = pop_mkey_locked(ent); in _mlx5_mr_cache_alloc()
751 mr->mmkey.cache_ent = ent; in _mlx5_mr_cache_alloc()
752 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_mr_cache_alloc()
753 mr->mmkey.rb_key = ent->rb_key; in _mlx5_mr_cache_alloc()
754 mr->mmkey.cacheable = true; in _mlx5_mr_cache_alloc()
755 init_waitqueue_head(&mr->mmkey.wait); in _mlx5_mr_cache_alloc()
756 return mr; in _mlx5_mr_cache_alloc()
1037 struct mlx5_ib_mr *mr; in mlx5_ib_get_dma_mr() local
1042 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dma_mr()
1043 if (!mr) in mlx5_ib_get_dma_mr()
1060 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
1065 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
1066 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1067 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1068 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1070 return &mr->ibmr; in mlx5_ib_get_dma_mr()
1076 kfree(mr); in mlx5_ib_get_dma_mr()
1099 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, in set_mr_fields() argument
1102 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
1103 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
1104 mr->ibmr.length = length; in set_mr_fields()
1105 mr->ibmr.device = &dev->ib_dev; in set_mr_fields()
1106 mr->ibmr.iova = iova; in set_mr_fields()
1107 mr->access_flags = access_flags; in set_mr_fields()
1128 struct mlx5_ib_mr *mr; in alloc_cacheable_mr() local
1149 mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode); in alloc_cacheable_mr()
1151 if (IS_ERR(mr)) in alloc_cacheable_mr()
1152 return mr; in alloc_cacheable_mr()
1153 mr->mmkey.rb_key = rb_key; in alloc_cacheable_mr()
1154 mr->mmkey.cacheable = true; in alloc_cacheable_mr()
1155 return mr; in alloc_cacheable_mr()
1158 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); in alloc_cacheable_mr()
1159 if (IS_ERR(mr)) in alloc_cacheable_mr()
1160 return mr; in alloc_cacheable_mr()
1162 mr->ibmr.pd = pd; in alloc_cacheable_mr()
1163 mr->umem = umem; in alloc_cacheable_mr()
1164 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr()
1165 set_mr_fields(dev, mr, umem->length, access_flags, iova); in alloc_cacheable_mr()
1167 return mr; in alloc_cacheable_mr()
1176 struct mlx5_ib_mr *mr; in reg_create_crossing_vhca_mr() local
1185 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in reg_create_crossing_vhca_mr()
1186 if (!mr) in reg_create_crossing_vhca_mr()
1209 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create_crossing_vhca_mr()
1213 mr->mmkey.type = MLX5_MKEY_MR; in reg_create_crossing_vhca_mr()
1214 set_mr_fields(dev, mr, length, access_flags, iova); in reg_create_crossing_vhca_mr()
1215 mr->ibmr.pd = pd; in reg_create_crossing_vhca_mr()
1217 mlx5_ib_dbg(dev, "crossing mkey = 0x%x\n", mr->mmkey.key); in reg_create_crossing_vhca_mr()
1219 return &mr->ibmr; in reg_create_crossing_vhca_mr()
1223 kfree(mr); in reg_create_crossing_vhca_mr()
1237 struct mlx5_ib_mr *mr; in reg_create() local
1249 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in reg_create()
1250 if (!mr) in reg_create()
1253 mr->ibmr.pd = pd; in reg_create()
1254 mr->access_flags = access_flags; in reg_create()
1255 mr->page_shift = order_base_2(page_size); in reg_create()
1272 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, in reg_create()
1296 get_octo_len(iova, umem->length, mr->page_shift) * 2); in reg_create()
1299 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1300 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); in reg_create()
1305 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1308 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1313 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1314 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift); in reg_create()
1315 mr->umem = umem; in reg_create()
1316 set_mr_fields(dev, mr, umem->length, access_flags, iova); in reg_create()
1319 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1321 return mr; in reg_create()
1326 kfree(mr); in reg_create()
1335 struct mlx5_ib_mr *mr; in mlx5_ib_get_dm_mr() local
1340 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dm_mr()
1341 if (!mr) in mlx5_ib_get_dm_mr()
1357 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1363 set_mr_fields(dev, mr, length, acc, start_addr); in mlx5_ib_get_dm_mr()
1365 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1371 kfree(mr); in mlx5_ib_get_dm_mr()
1430 struct mlx5_ib_mr *mr = NULL; in create_real_mr() local
1436 mr = alloc_cacheable_mr(pd, umem, iova, access_flags, in create_real_mr()
1443 mr = reg_create(pd, umem, iova, access_flags, page_size, in create_real_mr()
1447 if (IS_ERR(mr)) { in create_real_mr()
1449 return ERR_CAST(mr); in create_real_mr()
1452 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in create_real_mr()
1462 err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE); in create_real_mr()
1464 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_real_mr()
1468 return &mr->ibmr; in create_real_mr()
1477 struct mlx5_ib_mr *mr; in create_user_odp_mr() local
1492 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); in create_user_odp_mr()
1493 if (IS_ERR(mr)) in create_user_odp_mr()
1494 return ERR_CAST(mr); in create_user_odp_mr()
1495 return &mr->ibmr; in create_user_odp_mr()
1507 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags, in create_user_odp_mr()
1509 if (IS_ERR(mr)) { in create_user_odp_mr()
1511 return ERR_CAST(mr); in create_user_odp_mr()
1513 xa_init(&mr->implicit_children); in create_user_odp_mr()
1515 odp->private = mr; in create_user_odp_mr()
1516 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in create_user_odp_mr()
1520 err = mlx5_ib_init_odp_mr(mr); in create_user_odp_mr()
1523 return &mr->ibmr; in create_user_odp_mr()
1526 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_user_odp_mr()
1560 struct mlx5_ib_mr *mr = umem_dmabuf->private; in mlx5_ib_dmabuf_invalidate_cb() local
1564 if (!umem_dmabuf->sgt || !mr) in mlx5_ib_dmabuf_invalidate_cb()
1567 mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); in mlx5_ib_dmabuf_invalidate_cb()
1583 struct mlx5_ib_mr *mr = NULL; in reg_user_mr_dmabuf() local
1607 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, in reg_user_mr_dmabuf()
1609 if (IS_ERR(mr)) { in reg_user_mr_dmabuf()
1611 return ERR_CAST(mr); in reg_user_mr_dmabuf()
1614 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in reg_user_mr_dmabuf()
1616 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); in reg_user_mr_dmabuf()
1617 umem_dmabuf->private = mr; in reg_user_mr_dmabuf()
1619 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in reg_user_mr_dmabuf()
1623 mr->data_direct = true; in reg_user_mr_dmabuf()
1626 err = mlx5_ib_init_dmabuf_mr(mr); in reg_user_mr_dmabuf()
1629 return &mr->ibmr; in reg_user_mr_dmabuf()
1632 __mlx5_ib_dereg_mr(&mr->ibmr); in reg_user_mr_dmabuf()
1745 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, in can_use_umr_rereg_pas() argument
1750 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in can_use_umr_rereg_pas()
1753 if (!mr->mmkey.cache_ent) in can_use_umr_rereg_pas()
1761 return (mr->mmkey.cache_ent->rb_key.ndescs) >= in can_use_umr_rereg_pas()
1765 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, in umr_rereg_pas() argument
1769 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pas()
1771 struct ib_umem *old_umem = mr->umem; in umr_rereg_pas()
1779 err = mlx5r_umr_revoke_mr(mr); in umr_rereg_pas()
1784 mr->ibmr.pd = pd; in umr_rereg_pas()
1788 mr->access_flags = access_flags; in umr_rereg_pas()
1792 mr->ibmr.iova = iova; in umr_rereg_pas()
1793 mr->ibmr.length = new_umem->length; in umr_rereg_pas()
1794 mr->page_shift = order_base_2(page_size); in umr_rereg_pas()
1795 mr->umem = new_umem; in umr_rereg_pas()
1796 err = mlx5r_umr_update_mr_pas(mr, upd_flags); in umr_rereg_pas()
1802 mr->umem = old_umem; in umr_rereg_pas()
1818 struct mlx5_ib_mr *mr = to_mmr(ib_mr); in mlx5_ib_rereg_user_mr() local
1821 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct) in mlx5_ib_rereg_user_mr()
1833 new_access_flags = mr->access_flags; in mlx5_ib_rereg_user_mr()
1841 if (can_use_umr_rereg_access(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1843 err = mlx5r_umr_rereg_pd_access(mr, new_pd, in mlx5_ib_rereg_user_mr()
1850 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1857 err = mlx5r_umr_revoke_mr(mr); in mlx5_ib_rereg_user_mr()
1860 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1861 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1864 return create_real_mr(new_pd, umem, mr->ibmr.iova, in mlx5_ib_rereg_user_mr()
1872 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1876 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { in mlx5_ib_rereg_user_mr()
1886 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova, in mlx5_ib_rereg_user_mr()
1888 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags, in mlx5_ib_rereg_user_mr()
1910 struct mlx5_ib_mr *mr, in mlx5_alloc_priv_descs() argument
1927 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1928 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1931 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1933 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); in mlx5_alloc_priv_descs()
1934 if (dma_mapping_error(ddev, mr->desc_map)) { in mlx5_alloc_priv_descs()
1941 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1947 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) in mlx5_free_priv_descs() argument
1949 if (!mr->umem && !mr->data_direct && in mlx5_free_priv_descs()
1950 mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) { in mlx5_free_priv_descs()
1951 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1952 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1955 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, in mlx5_free_priv_descs()
1957 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1958 mr->descs = NULL; in mlx5_free_priv_descs()
1963 struct mlx5_ib_mr *mr) in cache_ent_find_and_store() argument
1969 if (mr->mmkey.cache_ent) { in cache_ent_find_and_store()
1970 spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); in cache_ent_find_and_store()
1971 mr->mmkey.cache_ent->in_use--; in cache_ent_find_and_store()
1976 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1978 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1983 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1984 spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); in cache_ent_find_and_store()
1990 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()
1995 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1996 spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); in cache_ent_find_and_store()
1999 ret = push_mkey_locked(mr->mmkey.cache_ent, mr->mmkey.key); in cache_ent_find_and_store()
2000 spin_unlock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); in cache_ent_find_and_store()
2004 static int mlx5_ib_revoke_data_direct_mr(struct mlx5_ib_mr *mr) in mlx5_ib_revoke_data_direct_mr() argument
2006 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in mlx5_ib_revoke_data_direct_mr()
2007 struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem); in mlx5_ib_revoke_data_direct_mr()
2011 mr->revoked = true; in mlx5_ib_revoke_data_direct_mr()
2012 err = mlx5r_umr_revoke_mr(mr); in mlx5_ib_revoke_data_direct_mr()
2022 struct mlx5_ib_mr *mr, *next; in mlx5_ib_revoke_data_direct_mrs() local
2026 list_for_each_entry_safe(mr, next, &dev->data_direct_mr_list, dd_node) { in mlx5_ib_revoke_data_direct_mrs()
2027 list_del(&mr->dd_node); in mlx5_ib_revoke_data_direct_mrs()
2028 mlx5_ib_revoke_data_direct_mr(mr); in mlx5_ib_revoke_data_direct_mrs()
2032 static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) in mlx5_revoke_mr() argument
2034 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in mlx5_revoke_mr()
2035 struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; in mlx5_revoke_mr()
2036 bool is_odp = is_odp_mr(mr); in mlx5_revoke_mr()
2037 bool is_odp_dma_buf = is_dmabuf_mr(mr) && in mlx5_revoke_mr()
2038 !to_ib_umem_dmabuf(mr->umem)->pinned; in mlx5_revoke_mr()
2042 mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex); in mlx5_revoke_mr()
2045 dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL); in mlx5_revoke_mr()
2047 if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) { in mlx5_revoke_mr()
2048 ent = mr->mmkey.cache_ent; in mlx5_revoke_mr()
2063 mr->mmkey.cache_ent = NULL; in mlx5_revoke_mr()
2066 ret = destroy_mkey(dev, mr); in mlx5_revoke_mr()
2070 to_ib_umem_odp(mr->umem)->private = NULL; in mlx5_revoke_mr()
2071 mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex); in mlx5_revoke_mr()
2076 to_ib_umem_dmabuf(mr->umem)->private = NULL; in mlx5_revoke_mr()
2077 dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv); in mlx5_revoke_mr()
2085 struct mlx5_ib_mr *mr = to_mmr(ibmr); in __mlx5_ib_dereg_mr() local
2095 refcount_read(&mr->mmkey.usecount) != 0 && in __mlx5_ib_dereg_mr()
2096 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) in __mlx5_ib_dereg_mr()
2097 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in __mlx5_ib_dereg_mr()
2100 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in __mlx5_ib_dereg_mr()
2101 mr->sig, NULL, GFP_KERNEL); in __mlx5_ib_dereg_mr()
2103 if (mr->mtt_mr) { in __mlx5_ib_dereg_mr()
2104 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in __mlx5_ib_dereg_mr()
2107 mr->mtt_mr = NULL; in __mlx5_ib_dereg_mr()
2109 if (mr->klm_mr) { in __mlx5_ib_dereg_mr()
2110 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in __mlx5_ib_dereg_mr()
2113 mr->klm_mr = NULL; in __mlx5_ib_dereg_mr()
2117 mr->sig->psv_memory.psv_idx)) in __mlx5_ib_dereg_mr()
2119 mr->sig->psv_memory.psv_idx); in __mlx5_ib_dereg_mr()
2120 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in __mlx5_ib_dereg_mr()
2122 mr->sig->psv_wire.psv_idx); in __mlx5_ib_dereg_mr()
2123 kfree(mr->sig); in __mlx5_ib_dereg_mr()
2124 mr->sig = NULL; in __mlx5_ib_dereg_mr()
2128 rc = mlx5_revoke_mr(mr); in __mlx5_ib_dereg_mr()
2132 if (mr->umem) { in __mlx5_ib_dereg_mr()
2133 bool is_odp = is_odp_mr(mr); in __mlx5_ib_dereg_mr()
2136 atomic_sub(ib_umem_num_pages(mr->umem), in __mlx5_ib_dereg_mr()
2138 ib_umem_release(mr->umem); in __mlx5_ib_dereg_mr()
2140 mlx5_ib_free_odp_mr(mr); in __mlx5_ib_dereg_mr()
2143 if (!mr->mmkey.cache_ent) in __mlx5_ib_dereg_mr()
2144 mlx5_free_priv_descs(mr); in __mlx5_ib_dereg_mr()
2146 kfree(mr); in __mlx5_ib_dereg_mr()
2151 struct mlx5_ib_mr *mr) in dereg_crossing_data_direct_mr() argument
2153 struct mlx5_ib_mr *dd_crossed_mr = mr->dd_crossed_mr; in dereg_crossing_data_direct_mr()
2156 ret = __mlx5_ib_dereg_mr(&mr->ibmr); in dereg_crossing_data_direct_mr()
2171 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_dereg_mr() local
2174 if (mr->data_direct) in mlx5_ib_dereg_mr()
2175 return dereg_crossing_data_direct_mr(dev, mr); in mlx5_ib_dereg_mr()
2201 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in _mlx5_alloc_mkey_descs() argument
2208 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
2209 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
2210 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
2212 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
2218 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
2222 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
2223 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
2224 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
2229 mlx5_free_priv_descs(mr); in _mlx5_alloc_mkey_descs()
2240 struct mlx5_ib_mr *mr; in mlx5_ib_alloc_pi_mr() local
2244 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_alloc_pi_mr()
2245 if (!mr) in mlx5_ib_alloc_pi_mr()
2248 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
2249 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
2260 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, in mlx5_ib_alloc_pi_mr()
2265 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
2268 return mr; in mlx5_ib_alloc_pi_mr()
2273 kfree(mr); in mlx5_ib_alloc_pi_mr()
2277 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_mem_reg_descs() argument
2280 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), in mlx5_alloc_mem_reg_descs()
2285 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_sg_gaps_descs() argument
2288 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), in mlx5_alloc_sg_gaps_descs()
2292 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_integrity_descs() argument
2301 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
2302 if (!mr->sig) in mlx5_alloc_integrity_descs()
2310 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
2311 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
2313 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
2314 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
2316 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
2317 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2320 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
2321 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
2324 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2327 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
2328 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
2337 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, in mlx5_alloc_integrity_descs()
2342 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
2343 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
2349 destroy_mkey(dev, mr); in mlx5_alloc_integrity_descs()
2350 mlx5_free_priv_descs(mr); in mlx5_alloc_integrity_descs()
2352 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2353 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
2355 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2356 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
2358 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
2360 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
2361 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
2363 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
2365 kfree(mr->sig); in mlx5_alloc_integrity_descs()
2377 struct mlx5_ib_mr *mr; in __mlx5_ib_alloc_mr() local
2381 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in __mlx5_ib_alloc_mr()
2382 if (!mr) in __mlx5_ib_alloc_mr()
2391 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
2392 mr->umem = NULL; in __mlx5_ib_alloc_mr()
2396 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2399 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2402 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, in __mlx5_ib_alloc_mr()
2415 return &mr->ibmr; in __mlx5_ib_alloc_mr()
2420 kfree(mr); in __mlx5_ib_alloc_mr()
2576 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_pa_mr_sg_pi() local
2580 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2583 mr->mmkey.ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2586 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2587 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2590 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2595 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2596 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2598 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2605 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, in mlx5_ib_sg_to_klms() argument
2614 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2616 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2619 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2620 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2623 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2628 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2636 mr->mmkey.ndescs = i; in mlx5_ib_sg_to_klms()
2637 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2643 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2650 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2657 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2658 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2666 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page() local
2669 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) in mlx5_set_page()
2672 descs = mr->descs; in mlx5_set_page()
2673 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2680 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page_pi() local
2683 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2686 descs = mr->descs; in mlx5_set_page_pi()
2687 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2699 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mtt_mr_sg_pi() local
2700 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2764 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_klm_mr_sg_pi() local
2765 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2797 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg_pi() local
2803 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2804 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2805 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2806 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2807 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2827 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2834 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2844 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2848 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2856 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg() local
2859 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg()
2861 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2862 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2865 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2866 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, in mlx5_ib_map_mr_sg()
2872 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2873 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()