/aosp_15_r20/external/webrtc/rtc_base/numerics/ |
H A D | divide_round_unittest.cc | 21 static_assert(DivideRoundUp(5, 1) == 5, ""); in TEST() 22 static_assert(DivideRoundUp(5, 2) == 3, ""); in TEST() 26 EXPECT_EQ(DivideRoundUp(uint8_t{0}, 1), 0); in TEST() 27 EXPECT_EQ(DivideRoundUp(uint8_t{0}, 3), 0); in TEST() 28 EXPECT_EQ(DivideRoundUp(int{0}, 1), 0); in TEST() 29 EXPECT_EQ(DivideRoundUp(int{0}, 3), 0); in TEST() 33 EXPECT_EQ(DivideRoundUp(uint8_t{255}, 2), 128); in TEST() 34 EXPECT_EQ(DivideRoundUp(std::numeric_limits<int>::max(), 2), in TEST() 127 static_assert(std::is_same<decltype(DivideRoundUp(TypeParam{100}, int8_t{3})), in TYPED_TEST() 131 std::is_same<decltype(DivideRoundUp(TypeParam{100}, int16_t{3})), in TYPED_TEST() [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/common/task/ |
H A D | weights_conversion.h | 41 const int dst_slices = DivideRoundUp(weights.shape.o, 4); in RearrangeWeightsToOHWIOGroupI4O4() 42 const int src_slices = DivideRoundUp(weights.shape.i, 4); in RearrangeWeightsToOHWIOGroupI4O4() 43 const int dst_groups = DivideRoundUp(dst_slices, out_group_size); in RearrangeWeightsToOHWIOGroupI4O4() 77 const int dst_slices = DivideRoundUp(weights.shape.o, 4); in RearrangeWeightsToODHWIOGroupI4O4() 78 const int src_slices = DivideRoundUp(weights.shape.i, 4); in RearrangeWeightsToODHWIOGroupI4O4() 79 const int dst_groups = DivideRoundUp(dst_slices, out_group_size); in RearrangeWeightsToODHWIOGroupI4O4() 115 const int dst_slices = DivideRoundUp(weights.shape.o, 4); in RearrangeWeightsToOHWIOGroupO4I4() 116 const int src_slices = DivideRoundUp(weights.shape.i, 4); in RearrangeWeightsToOHWIOGroupO4I4() 117 const int dst_groups = DivideRoundUp(dst_slices, out_group_size); in RearrangeWeightsToOHWIOGroupO4I4() 151 const int dst_slices = DivideRoundUp(weights.shape.o, 4); in RearrangeWeightsToODHWIOGroupO4I4() [all …]
|
H A D | work_group_picking.cc | 221 int wg_x = std::min(DivideRoundUp(grid.x, 2), wg_xy_size); in GetWorkGroup() 256 int planar_work_groups = DivideRoundUp(width * height, 128); in XY128RequiresMoreWorkGroupsThenXY128Linear() 260 int x_groups = DivideRoundUp(width, work_group.x); in XY128RequiresMoreWorkGroupsThenXY128Linear() 261 int y_groups = DivideRoundUp(height, work_group.y); in XY128RequiresMoreWorkGroupsThenXY128Linear()
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/common/ |
H A D | util_test.cc | 27 TEST(UtilTest, DivideRoundUp) { in TEST() argument 28 EXPECT_THAT(DivideRoundUp(0, 256), Eq(0)); in TEST() 29 EXPECT_THAT(DivideRoundUp(2u, 256), Eq(1)); in TEST() 30 EXPECT_THAT(DivideRoundUp(2, 256), Eq(1)); in TEST() 31 EXPECT_THAT(DivideRoundUp(255u, 256), Eq(1)); in TEST() 32 EXPECT_THAT(DivideRoundUp(255, 256), Eq(1)); in TEST() 33 EXPECT_THAT(DivideRoundUp(256u, 256), Eq(1)); in TEST() 34 EXPECT_THAT(DivideRoundUp(256, 256), Eq(1)); in TEST() 35 EXPECT_THAT(DivideRoundUp(257u, 256), Eq(2)); in TEST() 36 EXPECT_THAT(DivideRoundUp(257, 256), Eq(2)); in TEST()
|
H A D | util.h | 27 T DivideRoundUp(T n, N divisor) { in DivideRoundUp() function 34 inline uint3 DivideRoundUp(uint3 n, uint3 divisor) { in DivideRoundUp() function 35 return uint3(DivideRoundUp(n.x, divisor.x), DivideRoundUp(n.y, divisor.y), in DivideRoundUp() 36 DivideRoundUp(n.z, divisor.z)); in DivideRoundUp() 43 return DivideRoundUp(number, n) * n; in AlignByN()
|
H A D | convert.cc | 57 for (int p = 0; p < DivideRoundUp(shape.o, kPhwo4i4ChannelsInPlane); ++p) { in ConvertToPHWO4I4() 60 for (int c = 0; c < DivideRoundUp(shape.i, kPhwo4i4ChannelsInPlane); in ConvertToPHWO4I4() 118 DivideRoundUp(shape.o, 4)); in Get3DSizeForPHWO4I4() 135 const int dst_depth = DivideRoundUp(shape.o, 4); in ConvertToPHWO4I4() 136 const int src_depth = DivideRoundUp(shape.i, 4); in ConvertToPHWO4I4() 190 int32_t num_planes = DivideRoundUp(output_channels, kPiohw4ChannelsInPlane); in ConvertToPIOHW4() 243 int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane); in ConvertToPHWC4() 292 int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane); in ConvertToPHWC4Half() 418 int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane); in ConvertFromPHWC4() 460 int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane); in ConvertFromPHWC4Half()
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/common/tasks/ |
H A D | conv_generic.cc | 182 const int src_slices = DivideRoundUp(attr.weights.shape.i, 4); in ConvGeneric() 183 const int dst_slices = DivideRoundUp(attr.weights.shape.o, 4); in ConvGeneric() 324 DivideRoundUp(dst_[0]->Width(), conv_params_.block_size.x); in BindArguments() 326 DivideRoundUp(dst_[0]->Height(), conv_params_.block_size.y); in BindArguments() 328 DivideRoundUp(dst_[0]->Depth(), conv_params_.block_size.z); in BindArguments() 339 DivideRoundUp(dst_[0]->Width(), conv_params_.block_size.x); in GetGridSize() 341 DivideRoundUp(dst_[0]->Height(), conv_params_.block_size.y); in GetGridSize() 343 DivideRoundUp(dst_[0]->Depth(), conv_params_.block_size.z); in GetGridSize() 345 DivideRoundUp(dst_[0]->Slices(), conv_params_.block_size.w); in GetGridSize() 1138 const int dst_slices = DivideRoundUp(dst_shape.c, 4); in GetGroupsCount() [all …]
|
H A D | conv_constants.h | 37 const int dst_depth = DivideRoundUp(weights.shape.o, 4); in RearrangeWeightsForConvConstants() 38 const int src_depth = DivideRoundUp(weights.shape.i, 4); in RearrangeWeightsForConvConstants() 74 const int dst_depth = DivideRoundUp(weights.shape.o, 4); in RearrangeWeightsForConvConstantsDot() 75 const int src_depth = DivideRoundUp(weights.shape.i, 4); in RearrangeWeightsForConvConstantsDot() 113 const int src_depth = DivideRoundUp(weights.shape.i, 4); in UploadWeightsForConvConstants() 114 const int dst_depth = DivideRoundUp(weights.shape.o, 4); in UploadWeightsForConvConstants()
|
H A D | conv_metal_simd.cc | 350 int o_slices = DivideRoundUp(o_size, vec_size); in OIToVecOIOGroupIO() 351 int i_slices = DivideRoundUp(i_size, vec_size); in OIToVecOIOGroupIO() 352 int o_groups = DivideRoundUp(o_slices, o_group_size); in OIToVecOIOGroupIO() 420 int x_groups = DivideRoundUp(dst_shape.w * dst_shape.b, group_size.x); in Get2dGroupsCount() 421 int y_groups = DivideRoundUp(dst_shape.h, group_size.y); in Get2dGroupsCount() 446 DivideRoundUp(dst_[0]->Slices(), params_.slices_per_thread); in GetGridSize() 461 DivideRoundUp(dst_shape.w * dst_shape.b * dst_shape.h, 32); in CreateConvolutionMetalSimd() 538 const int src_slices = DivideRoundUp(attr.weights.shape.i, 4); in IsConvolutionMetalSimdSupported() 539 const int dst_slices = DivideRoundUp(attr.weights.shape.o, 4); in IsConvolutionMetalSimdSupported() 552 const double task_size_slices = DivideRoundUp(dst_shape.c, 16); in IsGoodTaskSizeForAppleConvSimd()
|
H A D | conv_constants.cc | 117 const int out_z = DivideRoundUp(weights_shape.o, 4); in GenerateConvolutionConstantCode() 119 const int src_depth = DivideRoundUp(weights_shape.i, 4); in GenerateConvolutionConstantCode() 215 const int src_depth = DivideRoundUp(src_channels, 4); in IsDotConvBetter() 216 const int dst_depth = DivideRoundUp(dst_channels, 4); in IsDotConvBetter() 241 const int src_depth = DivideRoundUp(w_shape.i, 4); in IsConvConstantsSupported() 242 const int dst_depth = DivideRoundUp(w_shape.o, 4); in IsConvConstantsSupported() 251 const int flt4_registers = DivideRoundUp(w_shape.o, 4); in IsConvConstantsSupported()
|
H A D | winograd.cc | 232 int tiles_x = DivideRoundUp(new_width, 4); in GetGridSize() 233 int tiles_y = DivideRoundUp(new_height, 4); in GetGridSize() 242 int tiles_x = DivideRoundUp(new_width, 4); in BindArguments() 243 int tiles_y = DivideRoundUp(new_height, 4); in BindArguments() 494 const int tiles_x = DivideRoundUp( in BindArguments() 496 const int tiles_y = DivideRoundUp( in BindArguments() 717 const int tiles_x = DivideRoundUp(dst_[0]->Width(), 4); in BindArguments() 723 const int tiles_x = DivideRoundUp(dst_[0]->Width(), 4); in GetGridSize() 724 const int tiles_y = DivideRoundUp(dst_[0]->Height(), 4); in GetGridSize()
|
H A D | fully_connected.h | 95 const int src_depth = DivideRoundUp(src_channels, 4); in RearrangeFCWeightsToOIO4I4() 97 const int dst_depth = DivideRoundUp(dst_channels, 4); in RearrangeFCWeightsToOIO4I4() 159 const int src_depth = DivideRoundUp(weights.shape.i, 4); in UploadWeights() 160 const int dst_depth = DivideRoundUp(weights.shape.o, 4); in UploadWeights()
|
H A D | fully_connected.cc | 40 const int src_depth = DivideRoundUp(weights.shape.i, 4); in RearrangeFCWeightsToOIO4I4() 41 const int dst_depth = DivideRoundUp(weights.shape.o, 4); in RearrangeFCWeightsToOIO4I4() 200 const int src_depth = DivideRoundUp(weights.shape.i, 4); in UploadQuantizedWeights() 201 const int dst_depth = DivideRoundUp(weights.shape.o, 4); in UploadQuantizedWeights()
|
H A D | conv_weights_converter.cc | 212 const int output_channels_x4_groups = DivideRoundUp( in BindArguments() 218 args->SetInt("in_ch_x4_groups", DivideRoundUp(weights_shape.i, 4))); in BindArguments() 234 DivideRoundUp(AlignByN(weights_shape.o, 4 * out_group_size), 4); in GetGridSize() 235 const int grid_y = DivideRoundUp(weights_shape.i, 4); in GetGridSize()
|
H A D | convolution_transposed.cc | 65 const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4); in ConvolutionTransposed() 110 const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4); in ConvolutionTransposed() 585 args->SetInt("grid_size_y", DivideRoundUp(aligned_h, block_size_.y))); in BindArguments() 594 const int grid_x = DivideRoundUp(aligned_w, block_size_.x) * dst_[0]->Batch(); in GetGridSize() 595 const int grid_y = DivideRoundUp(aligned_h, block_size_.y) * in GetGridSize() 596 DivideRoundUp(aligned_d, block_size_.z); in GetGridSize() 597 const int grid_z = DivideRoundUp(dst_[0]->Slices(), block_size_.w); in GetGridSize()
|
H A D | depthwise_conv.h | 120 const int dst_depth = DivideRoundUp(dst_channels, 4); in RearrangeWeightsForDWConv2D() 149 const int dst_slices = DivideRoundUp(dst_channels, 4); in UploadWeightsForDWConv2D() 188 const int dst_slices = DivideRoundUp(dst_channels, 4); in RearrangeWeightsForDWConv3D() 220 const int dst_slices = DivideRoundUp(dst_channels, 4); in UploadWeightsForDWConv3D()
|
/aosp_15_r20/external/federated-compute/fcp/secagg/shared/ |
H A D | math_test.cc | 31 EXPECT_THAT(DivideRoundUp(0, 8), Eq(0)); in TEST() 32 EXPECT_THAT(DivideRoundUp(1, 8), Eq(1)); in TEST() 33 EXPECT_THAT(DivideRoundUp(8, 8), Eq(1)); in TEST() 34 EXPECT_THAT(DivideRoundUp(12, 8), Eq(2)); in TEST() 35 EXPECT_THAT(DivideRoundUp(31, 8), Eq(4)); in TEST() 36 EXPECT_THAT(DivideRoundUp(32, 8), Eq(4)); in TEST() 37 EXPECT_THAT(DivideRoundUp(33, 8), Eq(5)); in TEST()
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/gl/kernels/ |
H A D | conv.cc | 69 {"src_depth", DivideRoundUp(weights.i, 4)}, in GenerateCode() 85 {"src_depth", DivideRoundUp(weights.i, 4)}, in GenerateCode() 201 DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)}, in GenerateCode() 206 MakeReadonlyObject(uint3(4, DivideRoundUp(attr.weights.shape.i, 4), in GenerateCode() 207 DivideRoundUp(attr.weights.shape.o, 4)), in GenerateCode() 244 auto dst_depth = DivideRoundUp(ctx.output_shapes[0][3], 4); in GenerateCode() 285 DivideRoundUp(ctx.output_shapes[0][3], 4)), in GenerateCode()
|
H A D | prelu.cc | 62 DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)), in GenerateCode() 93 DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)); in GenerateCode() 105 DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)), in GenerateCode()
|
H A D | mul.cc | 116 DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)), in GenerateMultiplyScalarCode() 133 DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)), in GenerateMultiplyScalarCode() 141 DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)), in GenerateMultiplyScalarCode()
|
H A D | add.cc | 70 DivideRoundUp(hwc_tensor->shape.c, 4)), in GenerateCode() 78 DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)), in GenerateCode() 151 DivideRoundUp(ctx.input_shapes[0][3], 4)), in GenerateCode()
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/metal/ |
H A D | buffer_convert.mm | 27 using ::tflite::gpu::DivideRoundUp; 115 int slices = DivideRoundUp(shape.c, 4); 116 int groups_x = DivideRoundUp(shape.w * shape.b, group_size.width); 117 int groups_y = DivideRoundUp(shape.h, group_size.height); 118 int groups_z = DivideRoundUp(slices, group_size.depth);
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/gl/ |
H A D | object.h | 157 DivideRoundUp(static_cast<uint32_t>(data.size()), 4U), data); in MakeReadonlyObject() 162 DivideRoundUp(static_cast<uint32_t>(data.size()), 4U), data); in MakeReadonlyTexture() 167 DivideRoundUp(static_cast<uint32_t>(data.size()), 4U), data); in MakeReadonlyBuffer() 176 size.z = shape.b * DivideRoundUp(shape.c, 4); in GetPHWC4Size()
|
/aosp_15_r20/external/mesa3d/src/gfxstream/guest/android/ |
H A D | GrallocEmulated.cpp | 23 T DivideRoundUp(T n, N divisor) { in DivideRoundUp() function 31 return DivideRoundUp(number, n) * n; in Align() 360 const uint32_t planeWidth = DivideRoundUp(alignedWidth, planeInfo.horizontalSubsampling); in lockPlanes() 361 const uint32_t planeHeight = DivideRoundUp(alignedHeight, planeInfo.verticalSubsampling); in lockPlanes() 462 const uint32_t planeWidth = DivideRoundUp(alignedWidth, planeInfo.horizontalSubsampling); in allocate() 463 const uint32_t planeHeight = DivideRoundUp(alignedHeight, planeInfo.verticalSubsampling); in allocate()
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/common/tasks/special/ |
H A D | fc_fc_add.h | 74 const int src_depth = DivideRoundUp(src_channels, 4); in RearrangeFCWeightsToOIO4I4() 76 const int dst_depth = DivideRoundUp(dst_channels, 4); in RearrangeFCWeightsToOIO4I4() 141 const int src_depth = DivideRoundUp(weights.shape.i, 4); in UploadWeights() 142 const int dst_depth = DivideRoundUp(weights.shape.o, 4); in UploadWeights()
|