Home
last modified time | relevance | path

Searched defs:dim (Results 1 – 25 of 1861) sorted by relevance

12345678910>>...75

/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DReduceOps.cpp193 TORCH_META_FUNC2(all, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_META_FUNC2() argument
205 TORCH_META_FUNC2(any, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_META_FUNC2() argument
220 const std::optional<int64_t>& dim) { in check_argmax_argmin()
247 int64_t dim, in meta_func_cum_ops()
291 TORCH_META_FUNC2(mean, dim) in TORCH_META_FUNC2() argument
356 auto dim = maybe_wrap_dim(dim_opt.value(), self.ndimension()); in TORCH_META_FUNC() local
445 Tensor _logcumsumexp_cpu(const Tensor& self, int64_t dim) { in _logcumsumexp_cpu()
450 Tensor& _logcumsumexp_out_cpu(const Tensor& self, int64_t dim, Tensor& result) { in _logcumsumexp_out_cpu()
455 Tensor logcumsumexp(const Tensor& self, int64_t dim) { in logcumsumexp()
464 Tensor& logcumsumexp_out(const Tensor& self, int64_t dim, Tensor& result) { in logcumsumexp_out()
[all …]
H A DSpectralOps.cpp162 IntArrayRef dim, int64_t norm, bool onesided) { in fft_r2c_maybe_out()
174 IntArrayRef dim, int64_t norm, SymInt last_dim_size) { in fft_c2r_maybe_out()
188 IntArrayRef dim, int64_t norm, bool forward) { in fft_c2c_maybe_out()
207 const auto dim = maybe_wrap_dim(unwrapped_dim, input_dim, /*wrap_scalar=*/false); in fft_c2r() local
233 const auto dim = maybe_wrap_dim(unwrapped_dim, input_dim, /*wrap_scalar=*/false); in fft_r2c() local
265 const auto dim = maybe_wrap_dim(unwrapped_dim, input_dim, /*wrap_scalar=*/false); in fft_c2c() local
278 DimVector dim; member
285 Tensor input, at::OptionalSymIntArrayRef shape, at::OptionalIntArrayRef dim) { in canonicalize_fft_shape_and_dim_args()
349 IntArrayRef dim, std::optional<c10::string_view> norm_str, bool forward) { in fftn_c2c()
360 Tensor fft_fft_symint(const Tensor& self, std::optional<SymInt> n, int64_t dim, in fft_fft_symint()
[all …]
H A DNamedTensor.cpp87 ptrdiff_t dim = (ptrdiff_t)tensor_sizes.size() - 1; in aligned_size() local
290 const auto& dim = tensor_names[idx]; in align_to() local
335 Tensor gather(const Tensor& self, Dimname dim, const Tensor& index, bool sparse_grad) { in gather()
338 Tensor& gather_out(const Tensor& self, Dimname dim, const Tensor& index, bool sparse_grad, Tensor& … in gather_out()
341 Tensor index_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const … in index_add()
344 Tensor index_fill(const Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) { in index_fill()
347 Tensor& index_fill_(Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) { in index_fill_()
350 Tensor index_fill(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { in index_fill()
353 Tensor& index_fill_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { in index_fill_()
356 Tensor index_copy(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { in index_copy()
[all …]
H A DSorting.cpp56 int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); in TORCH_META_FUNC() local
95 void _fill_indices(const TensorBase &indices, int64_t dim) { in _fill_indices()
421 int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); in kthvalue_out_impl_cpu() local
505 int64_t dim, in median_with_indices_impl()
656 std::optional<int64_t> dim, in quantile_out()
674 std::optional<int64_t> dim, in quantile_out()
692 std::optional<int64_t> dim, in quantile()
707 std::optional<int64_t> dim, in quantile()
719 std::optional<int64_t> dim, in nanquantile_out()
737 std::optional<int64_t> dim, in nanquantile_out()
[all …]
H A DTensorShape.cpp397 int64_t dim = size.size(); in set_storage_meta__symint() local
662 Tensor& cat_out(TensorList tensors, Dimname dim, Tensor& result) { in cat_out()
667 Tensor cat(TensorList tensors, Dimname dim) { in cat()
673 Tensor& concat_out(TensorList tensors, Dimname dim, Tensor& result) { in concat_out()
677 Tensor concat(TensorList tensors, Dimname dim) { in concat()
681 Tensor & concat_out(TensorList tensors, int64_t dim, Tensor & result) { in concat_out()
685 Tensor concat(TensorList tensors, int64_t dim) { in concat()
690 Tensor& concatenate_out(TensorList tensors, Dimname dim, Tensor& result) { in concatenate_out()
694 Tensor concatenate(TensorList tensors, Dimname dim) { in concatenate()
698 Tensor& concatenate_out(TensorList tensors, int64_t dim, Tensor & result) { in concatenate_out()
[all …]
H A DIntegration.cpp32 Tensor do_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) { in do_trapezoid()
42 Tensor do_trapezoid(const Tensor& y, double dx, int64_t dim) { in do_trapezoid()
46 Tensor zeros_like_except(const Tensor& y, int64_t dim) { in zeros_like_except()
53 Tensor do_cumulative_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) { in do_cumulative_trapezoid()
60 Tensor do_cumulative_trapezoid(const Tensor& y, double dx, int64_t dim) { in do_cumulative_trapezoid()
85 Tensor trapezoid(const Tensor& y, const Tensor& x, int64_t dim) { in trapezoid()
122 Tensor trapezoid(const Tensor& y, const Scalar& dx, int64_t dim) { in trapezoid()
132 Tensor trapz(const Tensor& y, const Tensor& x, int64_t dim) { in trapz()
136 Tensor trapz(const Tensor& y, double dx, int64_t dim) { in trapz()
140 Tensor cumulative_trapezoid(const Tensor& y, const Tensor& x, int64_t dim) { in cumulative_trapezoid()
[all …]
H A DReduceOpsUtils.h35 const Tensor& src, int64_t dim, in restride_dim()
44 int64_t dim) { in _dimreduce_setup()
53 const Scalar& ident, int64_t dim, bool keepdim) { in _dimreduce_return_trivial()
141 for (int dim = shape.size() - 1; dim >= 0; dim--) { in shape_from_dim_mask() local
163 const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype in create_reduction_result()
176 for (const auto dim : c10::irange(ndim)) { in review_reduce_result() local
198 IntArrayRef dim = dim_opt.value_or(IntArrayRef{}); in make_reduction() local
212 at::OptionalIntArrayRef dim, bool keepdim, ScalarType out_dtype) { in make_reduction()
239 auto dim = dim_opt.value_or(IntArrayRef{}); in make_reduction() local
264 at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype) { in make_reduction()
[all …]
H A DTensorCompare.cpp249 TORCH_PRECOMPUTE_META_FUNC2(max, dim) in TORCH_PRECOMPUTE_META_FUNC2() argument
259 TORCH_PRECOMPUTE_META_FUNC2(min, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_PRECOMPUTE_META_FUNC2() argument
580 std::tuple<Tensor, Tensor> mode(const Tensor& self, int64_t dim, bool keepdim) { in mode()
586 std::tuple<Tensor &,Tensor &> mode_out(const Tensor& self, int64_t dim, bool keepdim, in mode_out()
630 int64_t dim, in minmax_out_impl()
664 std::tuple<Tensor, Tensor> qmax(const Tensor& self, int64_t dim, bool keepdim) { in qmax()
675 std::tuple<Tensor, Tensor> qmin(const Tensor& self, int64_t dim, bool keepdim) { in qmin()
686 std::tuple<Tensor, Tensor> _aminmax(const Tensor& self, int64_t dim, bool keepdim) { in _aminmax()
783 std::tuple<Tensor, Tensor> min(const Tensor& self, Dimname dim, bool keepdim) { in min()
786 std::tuple<Tensor &,Tensor &> min_out(const Tensor& self, Dimname dim, bool keepdim, Tensor& min, T… in min_out()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/
H A Dwindow_util.cc65 /* static */ std::string ToString(const WindowDimension& dim) { in ToString()
109 [](const WindowDimension& dim) { return StrCat(dim.size()); }); in ToString()
113 [](const WindowDimension& dim) { return StrCat(dim.stride()); }); in ToString()
116 add_field(" pad", [](const WindowDimension& dim) { in ToString()
121 add_field(" lhs_dilate", [](const WindowDimension& dim) { in ToString()
126 add_field(" rhs_dilate", [](const WindowDimension& dim) { in ToString()
131 add_field(" rhs_reversal", [](const WindowDimension& dim) { in ToString()
139 for (const auto& dim : window.dimensions()) { in HasStride() local
148 for (const auto& dim : window.dimensions()) { in HasPadding() local
157 return absl::c_all_of(window.dimensions(), [](const WindowDimension& dim) { in HasSymmetricPadding()
[all …]
/aosp_15_r20/external/executorch/kernels/test/
H A Dop_squeeze_copy_test.cpp27 op_squeeze_copy_dim_out(const Tensor& self, int64_t dim, Tensor& out) { in op_squeeze_copy_dim_out()
49 int64_t dim = 0; in TEST_F() local
59 int64_t dim = 0; variable
70 int64_t dim = 1; variable
80 int64_t dim = 0; variable
92 int64_t dim = 0; variable
104 int64_t dim = 1; variable
120 int64_t dim = 0; variable
135 int64_t dim = 0; variable
148 int64_t dim = 0; variable
[all …]
/aosp_15_r20/external/pytorch/torch/onnx/
H A Dsymbolic_opset11.py197 def select(g: jit_utils.GraphContext, self, dim, index): argument
385 def gather(g: jit_utils.GraphContext, self, dim, index, sparse_grad=False): argument
393 def scatter(g: jit_utils.GraphContext, self, dim, index, src): argument
414 def cumsum(g: jit_utils.GraphContext, self, dim, dtype=None): argument
506 def pop(g: jit_utils.GraphContext, tensor_list, dim): argument
511 def Delete(g: jit_utils.GraphContext, tensor_list, dim): argument
517 def cat(g: jit_utils.GraphContext, tensor_list, dim): argument
526 def stack(g: jit_utils.GraphContext, tensor_list, dim): argument
545 def unique_dim( argument
556 def topk(g: jit_utils.GraphContext, self, k, dim, largest, sorted, out=None): argument
[all …]
H A Dsymbolic_opset13.py27 def softmax(g: jit_utils.GraphContext, input, dim, dtype=None): argument
40 def log_softmax(g: jit_utils.GraphContext, input, dim, dtype=None): argument
52 def frobenius_norm(g: jit_utils.GraphContext, self, dim=None, keepdim=False): argument
63 def split(g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None): argument
119 def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): argument
125 g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None argument
132 g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None argument
140 g: jit_utils.GraphContext, self, indices_or_sections, dim, _outputs=None argument
277 def unbind(g: jit_utils.GraphContext, self, dim=0, _outputs=None): argument
391 def symbolic(g, self, dim=None, keepdim=None): argument
[all …]
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/
H A Dcopy_ops_util.cpp76 int64_t dim, in check_cat_args()
127 int64_t dim, in get_cat_out_target_size()
238 size_t dim = dims[i] >= 0 ? dims[i] : in.dim() + dims[i]; in check_permute_copy_args() local
253 bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { in check_unbind_copy_args()
390 int64_t dim, in check_select_copy_out_args()
402 int64_t dim, in get_select_copy_out_target_size()
419 int64_t dim, in check_split_with_sizes_copy_args()
446 int64_t dim, in get_split_with_sizes_copy_out_target_size()
459 int64_t dim, in check_squeeze_copy_dim_args()
469 int64_t dim, in get_squeeze_copy_dim_out_target_size()
[all …]
H A Dindex_util.cpp17 int64_t dim, in check_gather_args()
62 int64_t dim, in check_index_select_args()
112 int64_t dim, in get_index_select_out_target_size()
144 int64_t dim, in check_scatter_add_args()
196 int64_t dim, in check_scatter_src_args()
205 int64_t dim, in check_scatter_value_args()
215 int64_t dim, in check_select_scatter_args()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkl/
H A DSpectralOps.cpp146 for (const auto& dim : mirror_dims) { in _fft_fill_with_conjugate_symmetry_cpu_() local
170 Tensor& _fft_r2c_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalization, in _fft_r2c_mkl_out()
188 Tensor& _fft_c2r_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalization, in _fft_c2r_mkl_out()
195 Tensor& _fft_c2c_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalization, in _fft_c2c_mkl_out()
247 T compute_fct(const Tensor& t, IntArrayRef dim, int64_t normalization) { in compute_fct()
261 Tensor _fft_c2r_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, int64_t last_dim_si… in _fft_c2r_mkl()
280 Tensor _fft_r2c_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, bool onesided) { in _fft_r2c_mkl()
308 Tensor _fft_c2c_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, bool forward) { in _fft_c2c_mkl()
431 IntArrayRef dim, int64_t normalization, bool forward) { in _exec_fft()
508 static DimVector _sort_dims(const Tensor& self, IntArrayRef dim, bool exclude_last=false) { in _sort_dims()
[all …]
/aosp_15_r20/external/mesa3d/src/amd/common/
H A Dac_nir_lower_image_opcodes_cdna.c31 static unsigned get_coord_components(enum glsl_sampler_dim dim, bool is_array) in get_coord_components()
50 enum glsl_sampler_dim dim, bool is_array, in lower_image_coords()
112 enum gl_access_qualifier access, enum glsl_sampler_dim dim, in emulated_image_load()
128 enum glsl_sampler_dim dim, bool is_array) in emulated_image_store()
140 static nir_def *get_dim(nir_builder *b, nir_def *desc, unsigned dim) in get_dim()
172 for (unsigned dim = 0; dim < num_dim_coords; dim++) in emulated_tex_level_zero() local
198 for (unsigned dim = 0; dim < num_dim_coords; dim++) { in emulated_tex_level_zero() local
220 for (unsigned dim = 0; dim < num_dim_coords; dim++) { in emulated_tex_level_zero() local
248 for (unsigned dim = 0; dim < num_dim_coords; dim++) in emulated_tex_level_zero() local
257 for (unsigned dim = 0; dim < num_dim_coords; dim++) { in emulated_tex_level_zero() local
[all …]
/aosp_15_r20/device/google/contexthub/firmware/os/algos/common/math/
Dvec.c55 void vecAdd(float *u, const float *v, const float *w, size_t dim) { in vecAdd()
65 void vecAddInPlace(float *v, const float *w, size_t dim) { in vecAddInPlace()
74 void vecSub(float *u, const float *v, const float *w, size_t dim) { in vecSub()
84 void vecScalarMul(float *u, const float *v, float c, size_t dim) { in vecScalarMul()
93 void vecScalarMulInPlace(float *v, float c, size_t dim) { in vecScalarMulInPlace()
101 float vecNorm(const float *v, size_t dim) { in vecNorm()
107 float vecNormSquared(const float *v, size_t dim) { in vecNormSquared()
112 float vecDot(const float *v, const float *w, size_t dim) { in vecDot()
123 float vecMaxAbsoluteValue(const float *v, size_t dim) { in vecMaxAbsoluteValue()
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DBatchRulesScatterOps.cpp353 for (const auto dim : c10::irange(indices_list.size())) { in compute_indexed_shape() local
677 int64_t dim, in scatter_batch_rule()
709 int64_t dim, in scatter_batch_rule()
747 int64_t dim, in scatter_value_batch_rule()
756 int64_t dim, in scatter_src_batch_rule()
765 int64_t dim, in scatter_add_batch_rule()
774 int64_t dim, in scatter_reduce_batch_rule()
784 int64_t dim, in scatter_value_reduce_batch_rule()
794 int64_t dim, in gather_batch_rule()
822 Tensor get_expanded_index(const Tensor& index, IntArrayRef self_size, int64_t dim) { in get_expanded_index()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
H A Dsharding_util_ops.cc80 for (int dim = 0; dim < expected_rank; ++dim) { in GetAndValidateAttributes() local
108 auto divisor = [&](const int dim) { in GetSliceIndices()
116 for (int dim = num_partitions.size() - 1; dim > 0; --dim) { in GetSliceIndices() local
153 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local
166 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local
185 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local
199 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local
208 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local
349 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local
373 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DWrapDimUtils.h17 inline int64_t maybe_wrap_dim(int64_t dim, TensorImpl* tensor) { in maybe_wrap_dim()
21 inline int64_t maybe_wrap_dim(int64_t dim, TensorList tensors) { in maybe_wrap_dim()
31 int64_t dim, in maybe_wrap_dim()
68 auto& dim = dims[i]; variable
108 int64_t dim, in legacy_cat_wrap_dim()
120 int64_t dim, in legacy_cat_wrap_dim_symint()
134 int64_t dim, in legacy_cat_wrap_dim()
/aosp_15_r20/external/tensorflow/tensorflow/core/grappler/utils/
H A Dsymbolic_shapes.cc36 bool IsKnown(const TensorShapeProto::Dim& dim) { return dim.size() >= 0; } in IsKnown()
38 bool IsKnownSymbolically(const TensorShapeProto::Dim& dim) { in IsKnownSymbolically()
42 bool IsUnknown(const TensorShapeProto::Dim& dim) { return dim.size() == -1; } in IsUnknown()
48 [](const TensorShapeProto::Dim& dim) { return !IsUnknown(dim); }); in ShapeIsSymbolicallyDefined()
67 for (const auto& dim : shape.dim()) { in NumCoefficients() local
125 for (const auto& dim : bcast.output_shape()) { in ShapeAfterBroadcast() local
157 const auto& dim = shape.dim(i); in CompareSymbolicallyShapedTensorSizes() local
226 for (const auto& dim : numerator.dim()) { in ComputeSizeRatio() local
236 for (const auto& dim : denominator.dim()) { in ComputeSizeRatio() local
/aosp_15_r20/external/pytorch/torch/csrc/api/src/nn/options/
H A Dactivation.cpp8 GLUOptions::GLUOptions(int64_t dim) : dim_(dim) {} in GLUOptions()
12 SoftmaxOptions::SoftmaxOptions(int64_t dim) : dim_(dim) {} in SoftmaxOptions()
14 SoftminOptions::SoftminOptions(int64_t dim) : dim_(dim) {} in SoftminOptions()
16 LogSoftmaxOptions::LogSoftmaxOptions(int64_t dim) : dim_(dim) {} in LogSoftmaxOptions()
34 SoftmaxFuncOptions::SoftmaxFuncOptions(int64_t dim) : dim_(dim) {} in SoftmaxFuncOptions()
36 SoftminFuncOptions::SoftminFuncOptions(int64_t dim) : dim_(dim) {} in SoftminFuncOptions()
38 LogSoftmaxFuncOptions::LogSoftmaxFuncOptions(int64_t dim) : dim_(dim) {} in LogSoftmaxFuncOptions()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DTensorShape.cu189 int64_t dim) { in get_split_base_addrs()
216 int64_t dim) { in get_split_chunk_sizes()
228 static inline int64_t get_chunk_stride(const at::Tensor& tensor, int64_t dim) { in get_chunk_stride()
237 static inline int64_t get_num_chunks(const at::Tensor& tensor, int64_t dim) { in get_num_chunks()
278 int64_t dim, in get_chunk_cat_out_sizes()
433 static inline int64_t get_leading_dim(at::IntArrayRef sizes, int64_t dim) { in get_leading_dim()
445 int64_t dim, in get_pad_size()
460 int64_t dim, in get_chunk_size()
490 int64_t dim, in get_chunk_cat_metadata()
558 int64_t dim, in _chunk_cat_out_cuda_contiguous()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dragged_tensor_to_sparse_kernel.cc89 for (int dim = rt_nested_splits_len - 2; dim >= 0; --dim) { in Compute() local
96 for (int dim = 0; dim < index_prefix.size(); ++dim) { in Compute() local
108 int dim = 0; in Compute() local
141 for (int dim = 0; dim < rt_nested_splits_len; ++dim) { in Compute() local
149 for (int dim = 1; dim < rt_dense_values_in.dims(); ++dim) { in Compute() local
204 for (int dim = 1; dim < values_shape.dims(); ++dim) { in MakeIndexSuffixes() local
221 const std::vector<int64_t>& pos, int dim, in IsCompleted()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/
H A DTensorShape.cpp35 bool is_cat_nhwc_fast_path(const MaterializedITensorListRef& qxs, int64_t dim) { in is_cat_nhwc_fast_path()
80 int64_t dim, in quantized_cat_impl()
120 int64_t dim, in quantized_cat_impl()
129 int64_t dim, in qcat()
141 Tensor qcat_out(const c10::List<Tensor>& qxs, int64_t dim, Tensor out) { in qcat_out()
161 Tensor cat_quantized_cpu(const ITensorListRef& qxs, int64_t dim) { in cat_quantized_cpu()
177 Tensor& cat_out_quantized_cpu(const ITensorListRef& qxs, int64_t dim, Tensor& out) { in cat_out_quantized_cpu()

12345678910>>...75