xref: /aosp_15_r20/external/ComputeLibrary/src/runtime/CL/CLTensor.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1*c217d954SCole Faust /*
2*c217d954SCole Faust  * Copyright (c) 2016-2019 Arm Limited.
3*c217d954SCole Faust  *
4*c217d954SCole Faust  * SPDX-License-Identifier: MIT
5*c217d954SCole Faust  *
6*c217d954SCole Faust  * Permission is hereby granted, free of charge, to any person obtaining a copy
7*c217d954SCole Faust  * of this software and associated documentation files (the "Software"), to
8*c217d954SCole Faust  * deal in the Software without restriction, including without limitation the
9*c217d954SCole Faust  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10*c217d954SCole Faust  * sell copies of the Software, and to permit persons to whom the Software is
11*c217d954SCole Faust  * furnished to do so, subject to the following conditions:
12*c217d954SCole Faust  *
13*c217d954SCole Faust  * The above copyright notice and this permission notice shall be included in all
14*c217d954SCole Faust  * copies or substantial portions of the Software.
15*c217d954SCole Faust  *
16*c217d954SCole Faust  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*c217d954SCole Faust  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*c217d954SCole Faust  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19*c217d954SCole Faust  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20*c217d954SCole Faust  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21*c217d954SCole Faust  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22*c217d954SCole Faust  * SOFTWARE.
23*c217d954SCole Faust  */
24*c217d954SCole Faust #include "arm_compute/runtime/CL/CLTensor.h"
25*c217d954SCole Faust 
26*c217d954SCole Faust #include "arm_compute/runtime/CL/CLRuntimeContext.h"
27*c217d954SCole Faust #include "arm_compute/runtime/CL/CLScheduler.h"
28*c217d954SCole Faust 
29*c217d954SCole Faust namespace arm_compute
30*c217d954SCole Faust {
CLTensor(IRuntimeContext * ctx)31*c217d954SCole Faust CLTensor::CLTensor(IRuntimeContext *ctx)
32*c217d954SCole Faust     : _allocator(this, static_cast<CLRuntimeContext *>(ctx)), _ctx(static_cast<CLRuntimeContext *>(ctx))
33*c217d954SCole Faust {
34*c217d954SCole Faust }
35*c217d954SCole Faust 
context()36*c217d954SCole Faust CLRuntimeContext *CLTensor::context()
37*c217d954SCole Faust {
38*c217d954SCole Faust     return _ctx;
39*c217d954SCole Faust }
40*c217d954SCole Faust 
info() const41*c217d954SCole Faust TensorInfo *CLTensor::info() const
42*c217d954SCole Faust {
43*c217d954SCole Faust     return &_allocator.info();
44*c217d954SCole Faust }
45*c217d954SCole Faust 
info()46*c217d954SCole Faust TensorInfo *CLTensor::info()
47*c217d954SCole Faust {
48*c217d954SCole Faust     return &_allocator.info();
49*c217d954SCole Faust }
50*c217d954SCole Faust 
cl_buffer() const51*c217d954SCole Faust const cl::Buffer &CLTensor::cl_buffer() const
52*c217d954SCole Faust {
53*c217d954SCole Faust     return _allocator.cl_data();
54*c217d954SCole Faust }
55*c217d954SCole Faust 
quantization() const56*c217d954SCole Faust CLQuantization CLTensor::quantization() const
57*c217d954SCole Faust {
58*c217d954SCole Faust     return _allocator.quantization();
59*c217d954SCole Faust }
60*c217d954SCole Faust 
allocator()61*c217d954SCole Faust CLTensorAllocator *CLTensor::allocator()
62*c217d954SCole Faust {
63*c217d954SCole Faust     return &_allocator;
64*c217d954SCole Faust }
65*c217d954SCole Faust 
map(bool blocking)66*c217d954SCole Faust void CLTensor::map(bool blocking)
67*c217d954SCole Faust {
68*c217d954SCole Faust     ICLTensor::map(_ctx == nullptr ? CLScheduler::get().queue() : _ctx->gpu_scheduler()->queue(), blocking);
69*c217d954SCole Faust }
70*c217d954SCole Faust 
unmap()71*c217d954SCole Faust void CLTensor::unmap()
72*c217d954SCole Faust {
73*c217d954SCole Faust     ICLTensor::unmap(_ctx == nullptr ? CLScheduler::get().queue() : _ctx->gpu_scheduler()->queue());
74*c217d954SCole Faust }
75*c217d954SCole Faust 
do_map(cl::CommandQueue & q,bool blocking)76*c217d954SCole Faust uint8_t *CLTensor::do_map(cl::CommandQueue &q, bool blocking)
77*c217d954SCole Faust {
78*c217d954SCole Faust     return _allocator.map(q, blocking);
79*c217d954SCole Faust }
80*c217d954SCole Faust 
do_unmap(cl::CommandQueue & q)81*c217d954SCole Faust void CLTensor::do_unmap(cl::CommandQueue &q)
82*c217d954SCole Faust {
83*c217d954SCole Faust     _allocator.unmap(q, buffer());
84*c217d954SCole Faust }
85*c217d954SCole Faust 
associate_memory_group(arm_compute::IMemoryGroup * memory_group)86*c217d954SCole Faust void CLTensor::associate_memory_group(arm_compute::IMemoryGroup *memory_group)
87*c217d954SCole Faust {
88*c217d954SCole Faust     _allocator.set_associated_memory_group(memory_group);
89*c217d954SCole Faust }
90*c217d954SCole Faust } // namespace arm_compute
91