Home
last modified time | relevance | path

Searched refs:TensorImpl (Results 1 – 25 of 177) sorted by relevance

12345678

/aosp_15_r20/external/pytorch/c10/core/
H A DTensorImpl.cpp30 const char* const TensorImpl::err_msg_tensor_metadata_change_not_allowed =
40 at::Tensor& TensorImpl::mutable_grad() { in mutable_grad()
46 const at::Tensor& TensorImpl::grad() const { in grad()
58 const at::Tensor& TensorImpl::_fw_grad( in _fw_grad()
67 void TensorImpl::_set_fw_grad( in _set_fw_grad()
77 TensorImpl::~TensorImpl() = default;
79 TensorImpl::TensorImpl( in TensorImpl() function in c10::TensorImpl
84 : TensorImpl( in TensorImpl()
108 TensorImpl::TensorImpl( in TensorImpl() function in c10::TensorImpl
127 TensorImpl::TensorImpl( in TensorImpl() function in c10::TensorImpl
[all …]
H A DTensorImpl.h157 at::TensorImpl* self_impl) = 0;
414 struct C10_API TensorImpl; variable
502 struct C10_API TensorImpl : public c10::intrusive_ptr_target { struct
503 TensorImpl() = delete;
504 ~TensorImpl() override;
516 TensorImpl( argument
522 TensorImpl(
531 TensorImpl(
538 TensorImpl( in TensorImpl() function
542 : TensorImpl( in TensorImpl()
[all …]
/aosp_15_r20/external/executorch/examples/qualcomm/qaihub_scripts/llama/runner/
H A Dio_memory.cpp16 using executorch::aten::TensorImpl;
47 for (TensorImpl* impl : input_tensors_[shard_index]) { in get_input_tensors()
56 for (TensorImpl* impl : output_tensors_[shard_index]) { in get_output_tensors()
97 input_ids_ = std::make_unique<TensorImpl>( in prepare_io()
100 const_cast<TensorImpl::SizesType*>(input_ids->sizes().data()), in prepare_io()
102 const_cast<TensorImpl::DimOrderType*>(input_ids->dim_order().data())); in prepare_io()
106 attention_mask_ = std::make_unique<TensorImpl>( in prepare_io()
109 const_cast<TensorImpl::SizesType*>(atten_mask->sizes().data()), in prepare_io()
111 const_cast<TensorImpl::DimOrderType*>(atten_mask->dim_order().data())); in prepare_io()
115 position_ids_cos_ = std::make_unique<TensorImpl>( in prepare_io()
[all …]
H A Dio_memory.h51 std::vector<std::vector<executorch::aten::TensorImpl*>> input_tensors_;
52 std::vector<std::vector<executorch::aten::TensorImpl*>> output_tensors_;
83 std::unique_ptr<executorch::aten::TensorImpl> input_ids_;
84 std::unique_ptr<executorch::aten::TensorImpl> hidden_state_;
85 std::unique_ptr<executorch::aten::TensorImpl> attention_mask_;
86 std::unique_ptr<executorch::aten::TensorImpl> position_ids_cos_;
87 std::unique_ptr<executorch::aten::TensorImpl> position_ids_sin_;
88 std::vector<std::unique_ptr<executorch::aten::TensorImpl>> k_cache_;
89 std::vector<std::unique_ptr<executorch::aten::TensorImpl>> v_cache_;
90 std::unique_ptr<executorch::aten::TensorImpl> logits_;
[all …]
/aosp_15_r20/external/executorch/runtime/core/portable_type/test/
H A Dtensor_test.cpp18 using executorch::runtime::etensor::TensorImpl;
30 TensorImpl::SizesType sizes[1] = {1}; in TEST_F()
34 ET_EXPECT_DEATH({ TensorImpl y(ScalarType::Undefined, 1, sizes); }, ""); in TEST_F()
38 ET_EXPECT_DEATH({ TensorImpl y(ScalarType::NumOptions, 1, sizes); }, ""); in TEST_F()
40 { TensorImpl y(static_cast<ScalarType>(127), 1, sizes); }, ""); in TEST_F()
41 ET_EXPECT_DEATH({ TensorImpl y(static_cast<ScalarType>(-1), 1, sizes); }, ""); in TEST_F()
45 TensorImpl::SizesType sizes[1] = {5}; in TEST_F()
46 TensorImpl::DimOrderType dim_order[1] = {0}; in TEST_F()
48 auto a_impl = TensorImpl(ScalarType::Int, 1, sizes, data, dim_order, nullptr); in TEST_F()
56 TensorImpl::SizesType sizes[2] = {2, 2}; in TEST_F()
[all …]
H A Dtensor_impl_test.cpp24 using executorch::runtime::etensor::TensorImpl;
25 using SizesType = TensorImpl::SizesType;
26 using DimOrderType = TensorImpl::DimOrderType;
27 using StridesType = TensorImpl::StridesType;
44 TensorImpl t(ScalarType::Float, 2, sizes, data, dim_order, strides); in TEST_F()
68 TensorImpl t( in TEST_F()
98 TensorImpl t( in TEST_F()
127 TensorImpl t(ScalarType::Float, 2, sizes, data, dim_order, strides); in TEST_F()
155 TensorImpl t( in TEST_F()
202 TensorImpl t( in TEST_F()
[all …]
/aosp_15_r20/external/executorch/examples/qualcomm/oss_scripts/llama3_2/runner/
H A Dio_memory.cpp16 using executorch::aten::TensorImpl;
40 for (TensorImpl* impl : input_tensors_[shard_index]) { in get_input_tensors()
49 for (TensorImpl* impl : output_tensors_[shard_index]) { in get_output_tensors()
76 input_tok_ = std::make_unique<TensorImpl>( in prepare_io()
79 const_cast<TensorImpl::SizesType*>(input_tok->sizes().data()), in prepare_io()
81 const_cast<TensorImpl::DimOrderType*>(input_tok->dim_order().data())); in prepare_io()
86 input_pos_ = std::make_unique<TensorImpl>( in prepare_io()
89 const_cast<TensorImpl::SizesType*>(input_pos->sizes().data()), in prepare_io()
91 const_cast<TensorImpl::DimOrderType*>(input_pos->dim_order().data())); in prepare_io()
98 attention_mask_ = std::make_unique<TensorImpl>( in prepare_io()
[all …]
H A Dio_memory.h54 std::vector<std::vector<executorch::aten::TensorImpl*>> input_tensors_;
55 std::vector<std::vector<executorch::aten::TensorImpl*>> output_tensors_;
90 std::unique_ptr<executorch::aten::TensorImpl> input_tok_;
91 std::unique_ptr<executorch::aten::TensorImpl> input_pos_;
92 std::unique_ptr<executorch::aten::TensorImpl> hidden_state_;
93 std::unique_ptr<executorch::aten::TensorImpl> attention_mask_;
94 std::vector<std::unique_ptr<executorch::aten::TensorImpl>> k_cache_in_;
95 std::vector<std::unique_ptr<executorch::aten::TensorImpl>> v_cache_in_;
96 std::vector<std::unique_ptr<executorch::aten::TensorImpl>> k_cache_out_;
97 std::vector<std::unique_ptr<executorch::aten::TensorImpl>> v_cache_out_;
[all …]
/aosp_15_r20/external/pytorch/c10/core/impl/
H A DPyInterpreter.h20 struct TensorImpl;
136 virtual c10::intrusive_ptr<TensorImpl> detach(
137 const TensorImpl* self) const = 0;
169 virtual bool is_contiguous(const TensorImpl* self, at::MemoryFormat)
171 virtual bool is_strides_like(const TensorImpl* self, at::MemoryFormat)
173 virtual bool is_non_overlapping_and_dense(const TensorImpl* self) const = 0;
174 virtual c10::Device device(const TensorImpl* self) const = 0;
175 virtual int64_t dim(const TensorImpl* self) const = 0;
176 virtual c10::IntArrayRef strides(const TensorImpl* self) const = 0;
177 virtual c10::IntArrayRef sizes(const TensorImpl* self) const = 0;
[all …]
H A DPyInterpreter.cpp23 c10::intrusive_ptr<TensorImpl> detach(const TensorImpl* self) const override { in detach()
60 bool is_contiguous(const TensorImpl* self, at::MemoryFormat) const override { in is_contiguous()
63 bool is_strides_like(const TensorImpl* self, at::MemoryFormat) in is_strides_like()
67 bool is_non_overlapping_and_dense(const TensorImpl* self) const override { in is_non_overlapping_and_dense()
70 c10::Device device(const TensorImpl* self) const override { in device()
73 int64_t dim(const TensorImpl* self) const override { in dim()
76 c10::IntArrayRef strides(const TensorImpl* self) const override { in strides()
79 c10::IntArrayRef sizes(const TensorImpl* self) const override { in sizes()
82 c10::SymIntArrayRef sym_sizes(const TensorImpl* self) const override { in sym_sizes()
85 c10::Layout layout(const TensorImpl* self) const override { in layout()
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/
H A DPyInterpreter.cpp52 c10::intrusive_ptr<c10::TensorImpl> detach(
53 const c10::TensorImpl* self) const override;
83 bool is_contiguous(const c10::TensorImpl* self, at::MemoryFormat)
85 bool is_strides_like(const c10::TensorImpl* self, at::MemoryFormat)
87 bool is_non_overlapping_and_dense(const c10::TensorImpl* self) const override;
88 c10::Device device(const c10::TensorImpl* self) const override;
89 int64_t dim(const c10::TensorImpl* self) const override;
90 c10::IntArrayRef strides(const c10::TensorImpl* self) const override;
91 c10::IntArrayRef sizes(const c10::TensorImpl* self) const override;
92 c10::SymIntArrayRef sym_sizes(const c10::TensorImpl* self) const override;
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DMemoryOverlap.h6 struct TensorImpl;
24 TORCH_API MemOverlap has_internal_overlap(c10::TensorImpl* t);
27 TORCH_API void assert_no_internal_overlap(c10::TensorImpl* t);
32 get_overlap_status(const c10::TensorImpl* a, const c10::TensorImpl* b);
37 void assert_no_partial_overlap(c10::TensorImpl* a, c10::TensorImpl* b);
40 TORCH_API void assert_no_overlap(c10::TensorImpl* a, c10::TensorImpl* b);
H A DNamedTensorUtils.h158 TORCH_API TensorImpl* propagate_names_if_nonempty(
159 TensorImpl* result,
163 TORCH_API TensorImpl* propagate_names(
164 TensorImpl* result,
168 TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src);
202 TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
210 TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
H A DMemoryOverlap.cpp12 MemOverlap has_internal_overlap(TensorImpl* t) { in has_internal_overlap()
40 void assert_no_internal_overlap(TensorImpl* t) { in assert_no_internal_overlap()
51 MemOverlapStatus get_overlap_status(const TensorImpl* a, const TensorImpl* b) { in get_overlap_status()
86 void assert_no_partial_overlap(TensorImpl* a, TensorImpl* b) { in assert_no_partial_overlap()
97 void assert_no_overlap(TensorImpl* a, TensorImpl* b) { in assert_no_overlap()
H A DOpaqueTensorImpl.h21 struct TORCH_API OpaqueTensorImpl : public TensorImpl {
30 : TensorImpl(key_set, data_type, device), in TensorImpl() function
43 TensorImpl::release_resources(); in release_resources()
73 c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( in shallow_copy_and_detach()
97 c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( in shallow_copy_and_detach()
121 void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override { in shallow_copy_from()
154 TensorImpl::copy_tensor_metadata( in copy_tensor_metadata()
169 TensorImpl::copy_tensor_metadata( in copy_tensor_metadata()
H A DNestedTensorImpl.h20 struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
42 c10::TensorImpl::ImplType impl_type,
100 auto buffer_tensor_impl = c10::make_intrusive<TensorImpl>( in get_unsafe_storage_as_tensor()
101 c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_); in get_unsafe_storage_as_tensor()
133 c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
137 c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
141 void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override { in shallow_copy_from()
177 c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A Dcustom_function.cpp30 std::unordered_map<at::TensorImpl*, size_t> inputs_mapping, in _process_forward_mode_AD() argument
33 const std::unordered_set<at::TensorImpl*>& non_differentiable, in _process_forward_mode_AD()
34 const std::unordered_set<at::TensorImpl*>& dirty_inputs, in _process_forward_mode_AD()
47 std::vector<at::TensorImpl*> grad_impls; in _process_forward_mode_AD()
48 std::unordered_map<at::TensorImpl*, size_t> inputs_bases; in _process_forward_mode_AD()
257 const std::unordered_map<at::TensorImpl*, size_t>& inputs_mapping, in _process_backward_mode_ad() argument
258 const std::unordered_set<at::TensorImpl*>& non_differentiable, in _process_backward_mode_ad()
259 const std::unordered_set<at::TensorImpl*>& dirty_inputs, in _process_backward_mode_ad()
262 const std::unordered_set<at::TensorImpl*>& to_save_if_setup_context, in _process_backward_mode_ad()
349 std::unordered_set<at::TensorImpl*> outputs_impl; // For dirty_inputs check in _process_backward_mode_ad()
[all …]
/aosp_15_r20/external/executorch/runtime/core/exec_aten/testing_util/test/
H A Dtensor_factory_test.cpp80 using torch::executor::TensorImpl;
133 TensorImpl impl = in TEST_F()
134 TensorImpl(ScalarType::Int, dim, sizes, data, dim_order, strides); in TEST_F()
162 TensorImpl impl = in TEST_F()
163 TensorImpl(ScalarType::Float, dim, sizes, data, dim_order, strides); in TEST_F()
192 TensorImpl impl = in TEST_F()
193 TensorImpl(ScalarType::Bool, dim, sizes, data, dim_order, strides); in TEST_F()
325 TensorImpl impl = in TEST_F()
326 TensorImpl(ScalarType::Int, dim, sizes, data, dim_order, strides); in TEST_F()
362 TensorImpl impl = in TEST_F()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/java/src/main/java/org/tensorflow/lite/
H A DNativeInterpreterWrapper.java118 this.inputTensors = new TensorImpl[getInputCount(interpreterHandle)]; in init()
119 this.outputTensors = new TensorImpl[getOutputCount(interpreterHandle)]; in init()
191 TensorImpl tensor = getInputTensor(input.getKey(), signatureKey); in runSignature()
233 TensorImpl tensor = getInputTensor(i); in run()
252 for (TensorImpl outputTensor : outputTensors) { in run()
301 for (TensorImpl outputTensor : outputTensors) { in allocateTensorsIfNeeded()
371 TensorImpl getInputTensor(int index) { in getInputTensor()
375 TensorImpl inputTensor = inputTensors[index]; in getInputTensor()
379 TensorImpl.fromIndex( in getInputTensor()
390 TensorImpl getInputTensor(String inputName, String signatureKey) { in getInputTensor()
[all …]
/aosp_15_r20/external/executorch/examples/models/llama3_2_vision/cross_attention/
H A Dcross_attention_mask_test.cpp16 using exec_aten::TensorImpl;
23 TensorImpl::SizesType sizes[2] = {2, 2}; in TEST()
24 TensorImpl::DimOrderType dim_order[2] = {0, 1}; in TEST()
25 TensorImpl::StridesType strides[2] = {2, 1}; in TEST()
29 TensorImpl(ScalarType::Int, 2, sizes, a_data, dim_order, strides); in TEST()
34 TensorImpl(ScalarType::Int, 2, sizes, b_data, dim_order, strides); in TEST()
39 TensorImpl(ScalarType::Int, 2, sizes, c_data, dim_order, strides); in TEST()
/aosp_15_r20/external/pytorch/aten/src/ATen/core/
H A DNamedTensor.cpp69 static NamedTensorMeta* get_named_tensor_meta(TensorImpl* impl) { in get_named_tensor_meta()
76 static const NamedTensorMeta* get_named_tensor_meta(const TensorImpl* impl) { in get_named_tensor_meta()
83 void check_names_valid_for(TensorImpl* impl, DimnameList names) { in check_names_valid_for()
87 void internal_set_names_inplace(TensorImpl* impl, std::optional<DimnameList> names, bool validate_n… in internal_set_names_inplace()
113 void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names… in internal_set_names_inplace()
130 std::optional<DimnameList> get_opt_names(const TensorImpl* impl) { in get_opt_names()
139 DimnameList get_names(const TensorImpl* impl) { in get_names()
147 bool has_names(const TensorImpl* impl) { in has_names()
H A DNamedTensor.h114 TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::optional<DimnameList> names, bool …
115 TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool vali…
117 void check_names_valid_for(TensorImpl* impl, DimnameList names);
123 TORCH_API bool has_names(const TensorImpl* impl);
128 TORCH_API DimnameList get_names(const TensorImpl* impl);
135 TORCH_API std::optional<DimnameList> get_opt_names(const TensorImpl* impl);
/aosp_15_r20/external/executorch/runtime/core/portable_type/
H A Dtensor.h32 using SizesType = TensorImpl::SizesType;
34 using DimOrderType = TensorImpl::DimOrderType;
36 using StridesType = TensorImpl::StridesType;
39 explicit constexpr Tensor(TensorImpl* impl) : impl_(impl) {} in Tensor()
47 TensorImpl* unsafeGetTensorImpl() const { in unsafeGetTensorImpl()
162 TensorImpl* impl_ = nullptr;
H A Dtensor_impl.cpp27 ssize_t compute_numel(const TensorImpl::SizesType* sizes, ssize_t dim) { in compute_numel()
43 TensorImpl::TensorImpl( in TensorImpl() function in executorch::runtime::etensor::TensorImpl
65 size_t TensorImpl::nbytes() const { in nbytes()
70 ssize_t TensorImpl::element_size() const { in element_size()
74 Error TensorImpl::internal_resize_contiguous(ArrayRef<SizesType> new_sizes) { in internal_resize_contiguous()
/aosp_15_r20/external/tensorflow/tensorflow/lite/java/src/test/java/org/tensorflow/lite/
H A DTensorTest.java56 private TensorImpl tensor;
421 assertThat(TensorImpl.computeNumDimensions(scalar)).isEqualTo(0); in testNumDimensions()
423 assertThat(TensorImpl.computeNumDimensions(array)).isEqualTo(2); in testNumDimensions()
427 IllegalArgumentException.class, () -> TensorImpl.computeNumDimensions(emptyArray)); in testNumDimensions()
434 assertThat(TensorImpl.computeNumElements(scalarShape)).isEqualTo(1); in testNumElements()
436 assertThat(TensorImpl.computeNumElements(vectorShape)).isEqualTo(3); in testNumElements()
438 assertThat(TensorImpl.computeNumElements(matrixShape)).isEqualTo(12); in testNumElements()
440 assertThat(TensorImpl.computeNumElements(degenerateShape)).isEqualTo(0); in testNumElements()
446 int num = TensorImpl.computeNumDimensions(array); in testFillShape()
448 TensorImpl.fillShape(array, 0, shape); in testFillShape()
[all …]

12345678