Lines Matching refs:cb

17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)  in cb_map_mem()  argument
30 if (cb->is_mmu_mapped) in cb_map_mem()
33 cb->roundup_size = roundup(cb->size, page_size); in cb_map_mem()
35 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size); in cb_map_mem()
36 if (!cb->virtual_addr) { in cb_map_mem()
43 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size); in cb_map_mem()
45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr); in cb_map_mem()
55 cb->is_mmu_mapped = true; in cb_map_mem()
60 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size); in cb_map_mem()
63 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); in cb_map_mem()
68 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_unmap_mem() argument
73 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size); in cb_unmap_mem()
77 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); in cb_unmap_mem()
80 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) in cb_fini() argument
82 if (cb->is_internal) in cb_fini()
84 (uintptr_t)cb->kernel_address, cb->size); in cb_fini()
86 hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address); in cb_fini()
88 kfree(cb); in cb_fini()
91 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) in cb_do_release() argument
93 if (cb->is_pool) { in cb_do_release()
94 atomic_set(&cb->is_handle_destroyed, 0); in cb_do_release()
96 list_add(&cb->pool_list, &hdev->cb_pool); in cb_do_release()
99 cb_fini(hdev, cb); in cb_do_release()
106 struct hl_cb *cb = NULL; in hl_cb_alloc() local
119 cb = kzalloc(sizeof(*cb), GFP_ATOMIC); in hl_cb_alloc()
121 if (!cb) in hl_cb_alloc()
122 cb = kzalloc(sizeof(*cb), GFP_KERNEL); in hl_cb_alloc()
124 if (!cb) in hl_cb_alloc()
130 kfree(cb); in hl_cb_alloc()
135 cb->is_internal = true; in hl_cb_alloc()
136 cb->bus_address = hdev->internal_cb_va_base + cb_offset; in hl_cb_alloc()
138 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC); in hl_cb_alloc()
140 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL); in hl_cb_alloc()
142 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, in hl_cb_alloc()
150 kfree(cb); in hl_cb_alloc()
154 cb->kernel_address = p; in hl_cb_alloc()
155 cb->size = cb_size; in hl_cb_alloc()
157 return cb; in hl_cb_alloc()
170 struct hl_cb *cb = buf->private; in hl_cb_mmap_mem_release() local
172 hl_debugfs_remove_cb(cb); in hl_cb_mmap_mem_release()
174 if (cb->is_mmu_mapped) in hl_cb_mmap_mem_release()
175 cb_unmap_mem(cb->ctx, cb); in hl_cb_mmap_mem_release()
177 hl_ctx_put(cb->ctx); in hl_cb_mmap_mem_release()
179 cb_do_release(cb->hdev, cb); in hl_cb_mmap_mem_release()
185 struct hl_cb *cb; in hl_cb_mmap_mem_alloc() local
199 cb = list_first_entry(&cb_args->hdev->cb_pool, in hl_cb_mmap_mem_alloc()
200 typeof(*cb), pool_list); in hl_cb_mmap_mem_alloc()
201 list_del(&cb->pool_list); in hl_cb_mmap_mem_alloc()
212 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb); in hl_cb_mmap_mem_alloc()
213 if (!cb) in hl_cb_mmap_mem_alloc()
217 cb->hdev = cb_args->hdev; in hl_cb_mmap_mem_alloc()
218 cb->ctx = cb_args->ctx; in hl_cb_mmap_mem_alloc()
219 cb->buf = buf; in hl_cb_mmap_mem_alloc()
220 cb->buf->mappable_size = cb->size; in hl_cb_mmap_mem_alloc()
221 cb->buf->private = cb; in hl_cb_mmap_mem_alloc()
223 hl_ctx_get(cb->ctx); in hl_cb_mmap_mem_alloc()
233 rc = cb_map_mem(cb_args->ctx, cb); in hl_cb_mmap_mem_alloc()
238 hl_debugfs_add_cb(cb); in hl_cb_mmap_mem_alloc()
243 hl_ctx_put(cb->ctx); in hl_cb_mmap_mem_alloc()
244 cb_do_release(cb_args->hdev, cb); in hl_cb_mmap_mem_alloc()
252 struct hl_cb *cb = buf->private; in hl_cb_mmap() local
254 return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address, in hl_cb_mmap()
255 cb->bus_address, cb->size); in hl_cb_mmap()
305 struct hl_cb *cb; in hl_cb_destroy() local
308 cb = hl_cb_get(mmg, cb_handle); in hl_cb_destroy()
309 if (!cb) { in hl_cb_destroy()
316 rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1); in hl_cb_destroy()
317 hl_cb_put(cb); in hl_cb_destroy()
337 struct hl_cb *cb; in hl_cb_info() local
340 cb = hl_cb_get(mmg, handle); in hl_cb_info()
341 if (!cb) { in hl_cb_info()
348 if (cb->is_mmu_mapped) { in hl_cb_info()
349 *device_va = cb->virtual_addr; in hl_cb_info()
356 *usage_cnt = atomic_read(&cb->cs_cnt); in hl_cb_info()
360 hl_cb_put(cb); in hl_cb_info()
439 void hl_cb_put(struct hl_cb *cb) in hl_cb_put() argument
441 hl_mmap_mem_buf_put(cb->buf); in hl_cb_put()
448 struct hl_cb *cb; in hl_cb_kernel_create() local
459 cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle); in hl_cb_kernel_create()
461 if (!cb) { in hl_cb_kernel_create()
467 return cb; in hl_cb_kernel_create()
477 struct hl_cb *cb; in hl_cb_pool_init() local
484 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, in hl_cb_pool_init()
486 if (cb) { in hl_cb_pool_init()
487 cb->is_pool = true; in hl_cb_pool_init()
488 list_add(&cb->pool_list, &hdev->cb_pool); in hl_cb_pool_init()
500 struct hl_cb *cb, *tmp; in hl_cb_pool_fini() local
502 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { in hl_cb_pool_fini()
503 list_del(&cb->pool_list); in hl_cb_pool_fini()
504 cb_fini(hdev, cb); in hl_cb_pool_fini()