Lines Matching refs:mr
60 struct mlx4_ib_mr *mr; in mlx4_ib_get_dma_mr() local
63 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_get_dma_mr()
64 if (!mr) in mlx4_ib_get_dma_mr()
68 ~0ull, convert_access(acc), 0, 0, &mr->mmr); in mlx4_ib_get_dma_mr()
72 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
76 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_get_dma_mr()
77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
79 return &mr->ibmr; in mlx4_ib_get_dma_mr()
82 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
85 kfree(mr); in mlx4_ib_get_dma_mr()
145 struct mlx4_ib_mr *mr; in mlx4_ib_reg_user_mr() local
150 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_reg_user_mr()
151 if (!mr) in mlx4_ib_reg_user_mr()
154 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); in mlx4_ib_reg_user_mr()
155 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
156 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
160 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); in mlx4_ib_reg_user_mr()
167 convert_access(access_flags), n, shift, &mr->mmr); in mlx4_ib_reg_user_mr()
171 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr()
175 err = mlx4_mr_enable(dev->dev, &mr->mmr); in mlx4_ib_reg_user_mr()
179 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_reg_user_mr()
180 mr->ibmr.page_size = 1U << shift; in mlx4_ib_reg_user_mr()
182 return &mr->ibmr; in mlx4_ib_reg_user_mr()
185 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_reg_user_mr()
188 ib_umem_release(mr->umem); in mlx4_ib_reg_user_mr()
191 kfree(mr); in mlx4_ib_reg_user_mr()
196 struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, in mlx4_ib_rereg_user_mr() argument
201 struct mlx4_ib_dev *dev = to_mdev(mr->device); in mlx4_ib_rereg_user_mr()
202 struct mlx4_ib_mr *mmr = to_mmr(mr); in mlx4_ib_rereg_user_mr()
243 mmr->umem = mlx4_get_umem_mr(mr->device, start, length, in mlx4_ib_rereg_user_mr()
288 struct mlx4_ib_mr *mr, in mlx4_alloc_priv_pages() argument
298 mr->page_map_size = roundup(max_pages * sizeof(u64), in mlx4_alloc_priv_pages()
302 mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); in mlx4_alloc_priv_pages()
303 if (!mr->pages) in mlx4_alloc_priv_pages()
306 mr->page_map = dma_map_single(device->dev.parent, mr->pages, in mlx4_alloc_priv_pages()
307 mr->page_map_size, DMA_TO_DEVICE); in mlx4_alloc_priv_pages()
309 if (dma_mapping_error(device->dev.parent, mr->page_map)) { in mlx4_alloc_priv_pages()
317 free_page((unsigned long)mr->pages); in mlx4_alloc_priv_pages()
322 mlx4_free_priv_pages(struct mlx4_ib_mr *mr) in mlx4_free_priv_pages() argument
324 if (mr->pages) { in mlx4_free_priv_pages()
325 struct ib_device *device = mr->ibmr.device; in mlx4_free_priv_pages()
327 dma_unmap_single(device->dev.parent, mr->page_map, in mlx4_free_priv_pages()
328 mr->page_map_size, DMA_TO_DEVICE); in mlx4_free_priv_pages()
329 free_page((unsigned long)mr->pages); in mlx4_free_priv_pages()
330 mr->pages = NULL; in mlx4_free_priv_pages()
336 struct mlx4_ib_mr *mr = to_mmr(ibmr); in mlx4_ib_dereg_mr() local
339 mlx4_free_priv_pages(mr); in mlx4_ib_dereg_mr()
341 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); in mlx4_ib_dereg_mr()
344 if (mr->umem) in mlx4_ib_dereg_mr()
345 ib_umem_release(mr->umem); in mlx4_ib_dereg_mr()
346 kfree(mr); in mlx4_ib_dereg_mr()
386 struct mlx4_ib_mr *mr; in mlx4_ib_alloc_mr() local
393 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_alloc_mr()
394 if (!mr) in mlx4_ib_alloc_mr()
398 max_num_sg, 0, &mr->mmr); in mlx4_ib_alloc_mr()
402 err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg); in mlx4_ib_alloc_mr()
406 mr->max_pages = max_num_sg; in mlx4_ib_alloc_mr()
407 err = mlx4_mr_enable(dev->dev, &mr->mmr); in mlx4_ib_alloc_mr()
411 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_alloc_mr()
412 mr->umem = NULL; in mlx4_ib_alloc_mr()
414 return &mr->ibmr; in mlx4_ib_alloc_mr()
417 mr->ibmr.device = pd->device; in mlx4_ib_alloc_mr()
418 mlx4_free_priv_pages(mr); in mlx4_ib_alloc_mr()
420 (void) mlx4_mr_free(dev->dev, &mr->mmr); in mlx4_ib_alloc_mr()
422 kfree(mr); in mlx4_ib_alloc_mr()
428 struct mlx4_ib_mr *mr = to_mmr(ibmr); in mlx4_set_page() local
430 if (unlikely(mr->npages == mr->max_pages)) in mlx4_set_page()
433 mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); in mlx4_set_page()
441 struct mlx4_ib_mr *mr = to_mmr(ibmr); in mlx4_ib_map_mr_sg() local
444 mr->npages = 0; in mlx4_ib_map_mr_sg()
446 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, in mlx4_ib_map_mr_sg()
447 mr->page_map_size, DMA_TO_DEVICE); in mlx4_ib_map_mr_sg()
451 ib_dma_sync_single_for_device(ibmr->device, mr->page_map, in mlx4_ib_map_mr_sg()
452 mr->page_map_size, DMA_TO_DEVICE); in mlx4_ib_map_mr_sg()