/aosp_15_r20/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | L2Normalization.cpp | 52 const uint32_t innerSize = in l2normFloat32Impl() local 55 const float* inputBeg = inputData + outer * axisSize * innerSize; in l2normFloat32Impl() 56 const float* inputEnd = inputBeg + axisSize * innerSize; in l2normFloat32Impl() 57 float* outputBeg = outputData + outer * axisSize * innerSize; in l2normFloat32Impl() 58 for (uint32_t inner = 0; inner < innerSize; ++inner, ++inputBeg, ++inputEnd, ++outputBeg) { in l2normFloat32Impl() 60 for (const float* p = inputBeg; p < inputEnd; p += innerSize) { in l2normFloat32Impl() 66 for (const float* p = inputBeg; p < inputEnd; p += innerSize, pOut += innerSize) { in l2normFloat32Impl() 79 const uint32_t innerSize = in l2normQuant8Impl() local 82 const uint8_t* inputBeg = inputData + outer * axisSize * innerSize; in l2normQuant8Impl() 83 const uint8_t* inputEnd = inputBeg + axisSize * innerSize; in l2normQuant8Impl() [all …]
|
D | Softmax.cpp | 55 const uint32_t innerSize = in softmaxSlowFloat32() local 58 const float* inputBeg = inputData + outer * axisSize * innerSize; in softmaxSlowFloat32() 59 const float* inputEnd = inputBeg + axisSize * innerSize; in softmaxSlowFloat32() 60 float* outputBeg = outputData + outer * axisSize * innerSize; in softmaxSlowFloat32() 61 for (uint32_t inner = 0; inner < innerSize; ++inner, ++inputBeg, ++inputEnd, ++outputBeg) { in softmaxSlowFloat32() 64 for (const float* p = inputBeg; p < inputEnd; p += innerSize) { in softmaxSlowFloat32() 69 for (const float* p = inputBeg; p < inputEnd; p += innerSize) { in softmaxSlowFloat32() 74 for (const float* p = inputBeg; p < inputEnd; p += innerSize, pOut += innerSize) { in softmaxSlowFloat32() 130 const uint32_t innerSize = in softmaxQuant8Impl() local 133 const T* inputBeg = inputData + outer * axisSize * innerSize; in softmaxQuant8Impl() [all …]
|
D | LogSoftmax.cpp | 37 const uint32_t innerSize = getNumberOfElements(shape, axis + 1, getNumberOfDimensions(shape)); in compute() local 39 for (uint32_t inner = 0; inner < innerSize; ++inner) { in compute() 43 T maxValue = input[outer * axisSize * innerSize + inner]; in compute() 45 maxValue = std::max(maxValue, input[(outer * axisSize + i) * innerSize + inner]); in compute() 51 (input[(outer * axisSize + i) * innerSize + inner] - maxValue) * beta)); in compute() 56 output[(outer * axisSize + i) * innerSize + inner] = in compute() 57 (input[(outer * axisSize + i) * innerSize + inner] - maxValue) * beta - in compute()
|
D | LocalResponseNormalization.cpp | 52 const uint32_t innerSize = in localResponseNormFloat32Impl() local 55 const float* inputBase = inputData + outer * axisSize * innerSize; in localResponseNormFloat32Impl() 56 float* outputBase = outputData + outer * axisSize * innerSize; in localResponseNormFloat32Impl() 57 for (uint32_t inner = 0; inner < innerSize; ++inner, ++inputBase, ++outputBase) { in localResponseNormFloat32Impl() 64 float val = inputBase[d * innerSize]; in localResponseNormFloat32Impl() 68 outputBase[i * innerSize] = inputBase[i * innerSize] * multiplier; in localResponseNormFloat32Impl()
|
D | ChannelShuffle.cpp | 34 const uint32_t innerSize = in eval() local 38 for (uint32_t inner = 0; inner < innerSize; ++inner) { in eval() 39 const T* inputBase = inputData + outer * axisSize * innerSize + inner; in eval() 40 T* outputBase = outputData + outer * axisSize * innerSize + inner; in eval() 43 j++, outputBase += innerSize) { in eval() 44 *outputBase = inputBase[innerSize * (i + j * groupSize)]; in eval()
|
D | ArgMinMax.cpp | 35 const int innerSize = in argMinMaxImpl() local 38 for (int inner = 0; inner < innerSize; ++inner) { in argMinMaxImpl() 39 auto minMaxValue = inputData[outer * axisSize * innerSize + inner]; in argMinMaxImpl() 42 const auto& value = inputData[(outer * axisSize + i) * innerSize + inner]; in argMinMaxImpl() 48 outputData[outer * innerSize + inner] = minMaxIndex; in argMinMaxImpl()
|
D | Gather.cpp | 35 const auto innerSize = in eval() local 43 std::memcpy(outputData + (outer * indicesCount + outputIndex) * innerSize, in eval() 44 inputData + (outer * axisSize + inputIndex) * innerSize, in eval() 45 sizeof(T) * innerSize); in eval()
|
/aosp_15_r20/external/eigen/test/ |
H A D | mapstride.cpp | 77 if(k==1 && (m.innerSize()+1)*m.outerSize() > maxsize2) in map_class_matrix() 81 …e, Alignment, OuterStride<Dynamic> > map(array, rows, cols, OuterStride<Dynamic>(m.innerSize()+1)); in map_class_matrix() 83 VERIFY(map.outerStride() == map.innerSize()+1); in map_class_matrix() 85 for(int j = 0; j < m.innerSize(); ++j) in map_class_matrix() 99 if(k==1 && (m.innerSize()+4)*m.outerSize() > maxsize2) in map_class_matrix() 108 map(array, rows, cols, OuterStride<OuterStrideAtCompileTime>(m.innerSize()+4)); in map_class_matrix() 110 VERIFY(map.outerStride() == map.innerSize()+4); in map_class_matrix() 112 for(int j = 0; j < m.innerSize(); ++j) in map_class_matrix() 125 if(k==1 && (2*m.innerSize()+1)*(m.outerSize()*2) > maxsize2) in map_class_matrix() 129 …t, Stride<Dynamic,Dynamic> > map(array, rows, cols, Stride<Dynamic,Dynamic>(2*m.innerSize()+1, 2)); in map_class_matrix() [all …]
|
H A D | sparse.h | 66 for(Index i=0; i<sparseMat.innerSize(); i++) 119 for(int i=0; i<sparseMat.innerSize(); i++)
|
H A D | sparse_basic.cpp | 137 …rXi::Constant(m2.outerSize(), ((mode%2)==0) ? int(m2.innerSize()) : std::max<int>(1,int(m2.innerSi… in sparse_basic() 307 std::vector<Scalar> ref_value(m2.innerSize()); in sparse_basic() 308 std::vector<Index> ref_index(m2.innerSize()); in sparse_basic() 354 m2.reserve(VectorXi::Constant(m2.outerSize(), int(m2.innerSize()))); in sparse_basic()
|
/aosp_15_r20/external/armnn/src/backends/reference/workloads/ |
H A D | Softmax.cpp | 31 const unsigned int innerSize = armnnUtils::GetNumElementsBetween(inputShape, in Softmax() local 37 unsigned int inputBeginIdx = outer * axisSize * innerSize; in Softmax() 38 unsigned int inputEndIdx = inputBeginIdx + axisSize * innerSize; in Softmax() 39 unsigned int outputBeginIdx = outer * axisSize * innerSize; in Softmax() 41 …for (unsigned int inner = 0; inner < innerSize; ++inner, ++inputBeginIdx, ++inputEndIdx, ++outputB… in Softmax() 45 for (unsigned int iter = inputBeginIdx; iter < inputEndIdx; iter += innerSize) in Softmax() 53 for (unsigned int iter = inputBeginIdx; iter < inputEndIdx; iter += innerSize) in Softmax() 62 …(unsigned int iter = inputBeginIdx; iter < inputEndIdx; iter += innerSize, outputIter += innerSize) in Softmax()
|
H A D | RefChannelShuffleWorkload.cpp | 58 …uint32_t innerSize = getNumberOfElements(tensorShape, channelsAxis + 1, tensorShape.GetNumDimensio… in Execute() local 62 for (uint32_t inner = 0; inner < innerSize; ++inner) in Execute() 64 uint32_t decoderStep1 = outer * tensorShape[channelsAxis] * innerSize + inner; in Execute() 66 uint32_t encoderStep1 = outer * tensorShape[channelsAxis] * innerSize + inner; in Execute() 70 … for (uint32_t j = 0; j < numGroups; j++, encoder += innerSize, encoderStep1 += innerSize) in Execute() 72 decoder += innerSize * (i + j * groupSize); in Execute() 75 decoder -= innerSize * (i + j * groupSize); in Execute()
|
H A D | LogSoftmax.cpp | 48 const unsigned int innerSize = armnnUtils::GetNumElementsBetween(inputShape, in LogSoftmax() local 54 for (unsigned int inner = 0; inner < innerSize; ++inner) in LogSoftmax() 57 input[outer * axisSize * innerSize + inner]; in LogSoftmax() 61 input[(outer * axisSize + i) * innerSize + inner]; in LogSoftmax() 69 input[(outer * axisSize + i) * innerSize + inner]; in LogSoftmax() 79 const unsigned int index = (outer * axisSize + i) * innerSize + inner; in LogSoftmax()
|
/aosp_15_r20/external/eigen/unsupported/Eigen/src/Skyline/ |
H A D | SkylineMatrix.h | 82 inline Index innerSize() const { 151 eigen_assert(inner < innerSize()); 200 eigen_assert(inner < innerSize()); 236 eigen_assert(idx < innerSize()); 245 eigen_assert(inner < innerSize()); 269 eigen_assert(inner < innerSize()); 289 eigen_assert(idx < innerSize()); 298 eigen_assert(inner < innerSize()); 317 eigen_assert(inner < innerSize()); 334 eigen_assert(inner < innerSize()); [all …]
|
/aosp_15_r20/external/eigen/Eigen/src/Core/ |
H A D | AssignEvaluator.h | 346 for(Index inner = 0; inner < kernel.innerSize(); ++inner) { 474 const Index innerSize = kernel.innerSize(); 478 for(Index inner = 0; inner < innerSize; inner+=packetSize) 558 const Index innerSize = kernel.innerSize(); 561 …able) || bool(dstIsAligned)) ? 0 : internal::first_aligned<requestedAlignment>(dst_ptr, innerSize); 565 const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); 575 for(Index inner = alignedEnd; inner<innerSize ; ++inner) 578 alignedStart = numext::mini((alignedStart+alignedStep)%packetSize, innerSize); 592 enum { innerSize = DstXprType::InnerSizeAtCompileTime, 594 vectorizableSize = (int(innerSize) / int(packetSize)) * int(packetSize), [all …]
|
H A D | Redux.h | 203 for(Index i = 1; i < xpr.innerSize(); ++i) 206 for(Index j = 0; j < xpr.innerSize(); ++j) 296 const Index innerSize = xpr.innerSize(); 301 const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize; 312 for(Index i=packetedInnerSize; i<innerSize; ++i)
|
H A D | CoreIterators.h | 42 : m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize()) in InnerIterator() 87 …NE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize) in inner_iterator_selector() argument 88 : m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize) in inner_iterator_selector()
|
H A D | Assign_MKL.h | 99 …VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, … 162 …VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, exponent, …
|
/aosp_15_r20/external/sdv/vsomeip/third_party/boost/numeric/odeint/include/boost/numeric/odeint/external/eigen/ |
D | eigen_resize.hpp | 63 return ( ( m1.innerSize () == m2.innerSize () ) && ( m1.outerSize() == m2.outerSize() ) ); in same_size() 73 return ( ( v1.innerSize () == v2.innerSize () ) && ( v1.outerSize() == v2.outerSize() ) ); in same_size()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/cuda/ |
H A D | SparseMatMul.cu | 440 int innerSize = confirm_mult_size(A.size_, B.size_); in Dgemm2() local 449 innerSize, in Dgemm2() 477 innerSize, in Dgemm2() 505 innerSize, in Dgemm2() 568 int innerSize = confirm_mult_size(A.size_, B.size_); in Sgemm2() local 578 innerSize, in Sgemm2() 605 innerSize, in Sgemm2() 633 innerSize, in Sgemm2()
|
/aosp_15_r20/external/eigen/unsupported/Eigen/src/SparseExtra/ |
H A D | RandomSetter.h | 206 const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize(); in RandomSetter() 207 const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize(); in RandomSetter() local 213 Index aux = innerSize - 1; in RandomSetter()
|
H A D | DynamicSparseMatrix.h | 84 inline Index innerSize() const { return m_innerSize; } 213 const Index innerSize = IsRowMajor ? cols : rows; 214 if (m_innerSize>innerSize) 234 eigen_assert(innerSize()==0 && outerSize()==0);
|
/aosp_15_r20/external/eigen/Eigen/src/SparseCore/ |
H A D | SparseView.h | 61 inline Index innerSize() const { return m_matrix.innerSize(); } 154 : m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize())
|
H A D | ConservativeSparseSparseProduct.h | 25 Index rows = lhs.innerSize(); 27 eigen_assert(lhs.outerSize() == rhs.innerSize()); 275 eigen_assert(lhs.outerSize() == rhs.innerSize());
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | Indexing.cu | 727 IndexType innerSize, in indexFuncSmallIndex() argument 746 linearIndex < innerSize; in indexFuncSmallIndex() 777 IndexType innerSize, in indexFuncLargeIndex() argument 789 srcIndex = linearIndex / innerSize; in indexFuncLargeIndex() 790 elementInSlice = linearIndex % innerSize; in indexFuncLargeIndex() 793 elementInSlice = linearIndex / innerSize; in indexFuncLargeIndex() 794 srcIndex = linearIndex % innerSize; in indexFuncLargeIndex() 1221 IndexType innerSize, in indexSelectSmallIndex() argument 1236 linearIndex < innerSize; in indexSelectSmallIndex() 1265 IndexType innerSize, in indexSelectLargeIndex() argument [all …]
|