/aosp_15_r20/external/pytorch/torch/nn/modules/ |
H A D | conv.py | 56 out_channels: int 70 out_channels: int, 88 if out_channels % groups != 0: 102 self.out_channels = out_channels 130 (in_channels, out_channels // groups, *kernel_size), **factory_kwargs)) 133 (out_channels, in_channels // groups, *kernel_size), **factory_kwargs)) 135 self.bias = Parameter(torch.empty(out_channels, **factory_kwargs)) 277 out_channels: int, 296 in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, 430 out_channels: int, [all …]
|
/aosp_15_r20/external/executorch/backends/arm/test/ops/ |
H A D | test_conv2d.py | 32 out_channels: Union[List, int, None] = None, 48 out_channels = [1 * nbr_conv] if out_channels is None else out_channels 61 if not isinstance(out_channels, List): 62 out_channels = [out_channels] 93 out_channels=out_channels[i], 116 out_channels=3, 128 out_channels=10, 139 out_channels=4, 150 out_channels=1, 161 out_channels=1, [all …]
|
H A D | test_conv1d.py | 31 out_channels: Union[List, int, None] = None, 47 out_channels = [1 * nbr_conv] if out_channels is None else out_channels 60 if not isinstance(out_channels, List): 61 out_channels = [out_channels] 90 out_channels=out_channels[i], 113 out_channels=3, 124 out_channels=10, 134 out_channels=4, 144 out_channels=1, 154 out_channels=1, [all …]
|
H A D | test_depthwise_conv.py | 33 out_channels=3, 44 out_channels=12, 55 out_channels=12, 67 out_channels=3, 78 out_channels=3, 90 out_channels=8, 102 out_channels=16, 114 out_channels=8, 128 out_channels=[8, 24], 142 out_channels=[8, 24],
|
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/ |
H A D | cudnn_deterministic_base.py | 103 depth=3, height=3, width=3, in_channels=3, out_channels=2) 123 depth=3, height=3, width=3, in_channels=8, out_channels=1) 138 height=3, width=3, in_channels=8, out_channels=8) 163 height=7, width=7, in_channels=1, out_channels=3) 187 out_channels = 1 191 height=7, width=7, out_channels=out_channels, in_channels=in_channels) 198 channels=out_channels) 217 out_channels = 8 221 height=3, width=3, out_channels=out_channels, in_channels=in_channels) 228 channels=out_channels) [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/nn/intrinsic/qat/modules/ |
H A D | conv_fused.py | 47 out_channels, argument 71 out_channels, 85 self.bn = _BN_CLASS_MAP[dim](out_channels, eps, momentum, True, True) 88 self.bias = Parameter(torch.empty(out_channels)) 156 self.out_channels, device=scaled_weight.device, dtype=input.dtype 201 self.out_channels, device=self.weight.device, dtype=input.dtype 371 conv.out_channels, 398 self.out_channels, 463 out_channels, argument 488 out_channels, [all …]
|
/aosp_15_r20/external/executorch/backends/cadence/aot/ |
H A D | ops_registrations.py | 349 out_channels, *kernel_size, _ = weight.shape 351 out_channels, _, *kernel_size = weight.shape 362 out_channels, 371 in_size, out_channels, stride, padding, dilation, kernel_size, channel_last 397 out_channels, *kernel_size, _ = weight.shape 399 out_channels, _, *kernel_size = weight.shape 410 out_channels, 419 in_size, out_channels, stride, padding, dilation, kernel_size, channel_last 625 out_channels, *kernel_size, _ = weight.shape 627 out_channels, _, *kernel_size = weight.shape [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/ |
H A D | conv.py | 44 out_channels, argument 61 out_channels, argument 79 if out_channels % groups != 0: 82 self.out_channels = out_channels 97 weight_shape = [in_channels, out_channels // self.groups] 99 weight_shape = [out_channels, in_channels // self.groups] 109 out_channels, 172 self.out_channels, 221 self.out_channels = state[1] 258 mod.out_channels, [all …]
|
/aosp_15_r20/external/libopus/dnn/torch/osce/utils/layers/ |
H A D | limited_adaptive_conv1d.py | 44 out_channels, argument 90 self.out_channels = out_channels 108 self.conv_kernel = norm(nn.Linear(feature_dim, in_channels * out_channels * kernel_size)) 114 self.filter_gain = norm(nn.Linear(feature_dim, out_channels)) 136 … count += 2 * (self.in_channels * self.out_channels * self.kernel_size * (1 + overhead) * rate) 140 …nt += 2 * (frame_rate * self.feature_dim * self.out_channels) + rate * (1 + overhead) * self.out_c… 143 count += 3 * overlap * frame_rate * self.out_channels 173 …ls = self.conv_kernel(features).reshape((batch_size, num_frames, self.out_channels, self.in_channe… 193 … conv_kernels = conv_kernels * conv_gains.view(batch_size, num_frames, self.out_channels, 1, 1)
|
H A D | limited_adaptive_comb1d.py | 88 self.out_channels = 1 159 …ls = self.conv_kernel(features).reshape((batch_size, num_frames, self.out_channels, self.in_channe… 179 … overlap_mem = torch.zeros((batch_size, self.out_channels, self.overlap_size), device=x.device) 193 …pe((batch_size * self.out_channels, self.in_channels, self.kernel_size)), groups=batch_size).resha… 221 … count += 2 * (self.in_channels * self.out_channels * self.kernel_size * (1 + overhead) * rate) 222 …nt += 2 * (frame_rate * self.feature_dim * self.out_channels) + rate * (1 + overhead) * self.out_c… 225 …nt += 2 * (frame_rate * self.feature_dim * self.out_channels) + rate * (1 + overhead) * self.out_c… 228 count += overlap * frame_rate * 3 * self.out_channels
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_efficient_conv_bn_eval.py | 38 out_channels, argument 43 self.conv = conv_class(in_channels, out_channels, bias=use_bias, **kwargs).to( 46 self.bn = bn_class(out_channels).to(device) 62 out_channels, argument 67 self.conv1 = conv_class(in_channels, out_channels, bias=use_bias, **kwargs).to( 70 self.bn1 = bn_class(out_channels).to(device) 71 self.conv2 = conv_class(out_channels, out_channels, bias=use_bias, **kwargs).to( 74 self.bn2 = bn_class(out_channels).to(device) 75 self.conv3 = conv_class(out_channels, out_channels, bias=use_bias, **kwargs).to( 78 self.bn3 = bn_class(out_channels).to(device)
|
/aosp_15_r20/external/executorch/backends/xnnpack/test/ops/ |
H A D | conv2d.py | 24 out_channels=1, argument 46 out_channels=out_channels, 72 out_channels=3, 79 out_channels=2, 228 self._test(Conv2d(groups=2, in_channels=2, out_channels=6)) 232 Conv2d(groups=2, in_channels=2, out_channels=6), 270 def __init__(self, in_channels: int, out_channels: int, kernel_size): 274 out_channels=out_channels, 280 self.native_batchnorm = torch.nn.BatchNorm2d(out_channels) 294 Conv2dBatchNormHardTanh(in_channels=2, out_channels=1, kernel_size=(2, 2)) [all …]
|
H A D | conv1d.py | 29 out_channels = 1 36 out_channels=out_channels, 55 out_channels = 2 61 out_channels=out_channels, 72 out_channels=out_channels,
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/reference/modules/ |
H A D | conv.py | 36 float_conv.out_channels, 58 out_channels: int, 73 out_channels, 121 out_channels, argument 136 out_channels, 184 out_channels, argument 199 out_channels, 254 float_conv.out_channels, 277 out_channels: int, 293 out_channels, [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/dynamic/modules/ |
H A D | conv.py | 57 out_channels: int, 80 out_channels, 142 out_channels, argument 165 out_channels, 226 out_channels, argument 248 out_channels, 316 out_channels, argument 334 out_channels, 398 out_channels, argument 416 out_channels, [all …]
|
/aosp_15_r20/external/libopus/dnn/torch/osce/models/ |
H A D | fd_discriminator.py | 375 layer.out_channels, 608 out_channels = self.num_channels 613 … norm_f(nn.Conv2d(in_channels, out_channels, (3, 3), stride=stride, padding=padding)), 617 in_channels = out_channels + 2 618 out_channels = min(2 * out_channels, self.num_channels_max) 744 out_channels = self.num_channels 749 …norm_f(nn.Conv2d(in_channels, out_channels, (3, 3), stride=plan[i][0], dilation=plan[i][1], paddin… 753 in_channels = out_channels + 2 756 out_channels = min(channel_factor * out_channels, self.num_channels_max) 804 out_channels = self.num_channels [all …]
|
/aosp_15_r20/external/libopus/dnn/ |
H A D | nndsp.c | 84 int out_channels, in scale_kernel() argument 93 for (i_out_channels = 0; i_out_channels < out_channels; i_out_channels++) in scale_kernel() 143 int out_channels, in adaconv_process_frame() argument 192 … print_float_vector("adaconv_kernel_raw", kernel_buffer, in_channels * out_channels * kernel_size); in adaconv_process_frame() 193 print_float_vector("adaconv_gain_raw", gain_buffer, out_channels); in adaconv_process_frame() 195 transform_gains(gain_buffer, out_channels, filter_gain_a, filter_gain_b); in adaconv_process_frame() 196 scale_kernel(kernel_buffer, in_channels, out_channels, kernel_size, gain_buffer); in adaconv_process_frame() 199 print_float_vector("adaconv_kernel", kernel_buffer, in_channels * out_channels * kernel_size); in adaconv_process_frame() 200 print_float_vector("adaconv_gain", gain_buffer, out_channels); in adaconv_process_frame() 205 for (i_out_channels = 0; i_out_channels < out_channels; i_out_channels++) in adaconv_process_frame() [all …]
|
H A D | nnet_arch.h | 159 static void conv2d_float(float *out, const float *weights, int in_channels, int out_channels, int k… in conv2d_float() argument 164 for (i=0;i<out_channels;i++) { in conv2d_float() 185 static void conv2d_3x3_float(float *out, const float *weights, int in_channels, int out_channels, c… in conv2d_3x3_float() argument 192 for (i=0;i<out_channels;i++) { in conv2d_3x3_float() 229 …conv2d_3x3_float(out, conv->float_weights, conv->in_channels, conv->out_channels, in_buf, height, … in RTCD_SUF() 231 …conv2d_float(out, conv->float_weights, conv->in_channels, conv->out_channels, conv->ktime, conv->k… in RTCD_SUF() 233 for (i=0;i<conv->out_channels;i++) { in RTCD_SUF() 238 for (i=0;i<conv->out_channels;i++) { in RTCD_SUF()
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/options/ |
H A D | conv.h | 31 int64_t out_channels, in ConvNdOptions() 34 out_channels_(out_channels), in ConvNdOptions() 43 TORCH_ARG(int64_t, out_channels); 110 int64_t out_channels, in ConvOptions() 113 out_channels_(out_channels), in ConvOptions() 122 TORCH_ARG(int64_t, out_channels); 264 int64_t out_channels, in ConvTransposeOptions() 267 out_channels_(out_channels), in ConvTransposeOptions() 276 TORCH_ARG(int64_t, out_channels);
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_mkldnn_fusion.py | 65 def __init__(self, in_channels, out_channels, bias, **kwargs): argument 67 self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs) 104 def __init__(self, unary_fn, in_channels, out_channels, bias, **kwargs): argument 106 self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs) 133 def __init__(self, m, in_channels, out_channels, bias, **kwargs): argument 135 self.conv = m(in_channels, out_channels, bias=bias, **kwargs) 196 def __init__(self, unary_fn, in_channels, out_channels, bias, **kwargs): argument 199 in_channels, out_channels, bias=bias, **kwargs 230 … def __init__(self, unary_fn, dim, in_channels, out_channels, dilation, groups, bias, **kwargs): argument 232 …self.conv = CONV_MODULES[dim](in_channels, out_channels, dilation=dilation, groups=groups, bias=bi… [all …]
|
/aosp_15_r20/external/libaom/av1/encoder/ |
H A D | cnn.c | 203 channels_per_branch[b] = layer_config->out_channels; in find_cnn_out_channels() 205 channels_per_branch[b] = layer_config->out_channels; in find_cnn_out_channels() 215 channels_per_branch[branch] = layer_config->out_channels; in find_cnn_out_channels() 240 int *out_height, int *out_channels) { in av1_find_cnn_output_size() argument 287 out_channels[output_num] = channels_per_branch[layer_config->branch]; in av1_find_cnn_output_size() 360 for (int i = 0; i < layer_config->out_channels; ++i) { in convolve_maxpool_padding_zero() 369 int off = k * layer_config->out_channels + i; in convolve_maxpool_padding_zero() 402 for (int i = 0; i < layer_config->out_channels; ++i) { in convolve_maxpool_padding_replicate() 411 int off = k * layer_config->out_channels + i; in convolve_maxpool_padding_replicate() 444 for (int i = 0; i < layer_config->out_channels; ++i) { in convolve_maxpool_padding_valid() [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ao_sparse/quantized/cpu/ |
H A D | qlinear.cpp | 51 int64_t out_channels = static_cast<int64_t>(packW->R); in apply_impl() local 74 output_multiplier_float.resize(out_channels, 0.0); in apply_impl() 75 act_times_w_scale.resize(out_channels, 1.0f); in apply_impl() 76 for (const auto i : c10::irange(out_channels)) { in apply_impl() 91 bias.size(0) == out_channels, in apply_impl() 93 std::to_string(out_channels)); in apply_impl() 104 out_sizes.back() = out_channels; // NOLINT in apply_impl() 205 out_channels, in apply_impl() 210 out_channels); in apply_impl()
|
/aosp_15_r20/external/pytorch/torch/ao/nn/qat/modules/ |
H A D | conv.py | 22 out_channels: int, 40 out_channels, 80 mod.out_channels, 101 self.out_channels, 145 out_channels: int, 163 out_channels, 206 out_channels: int, 224 out_channels, 270 out_channels: int, 288 out_channels,
|
/aosp_15_r20/external/executorch/backends/qualcomm/tests/ |
H A D | models.py | 220 out_channels=3, 228 out_channels=2, 243 in_channels=2, out_channels=2, kernel_size=1, stride=1, padding=1 273 out_channels = 1 277 out_channels=out_channels, 286 self.native_batchnorm = torch.nn.BatchNorm2d(out_channels) 316 out_channels=2, 332 out_channels=3, 339 out_channels=2, 354 out_channels=3, [all …]
|
/aosp_15_r20/external/executorch/backends/arm/operators/ |
H A D | op_conv2d.py | 101 out_channels = weight.shape[0] 104 [out_channels], 106 [0] * out_channels, 120 out_channels = weight.shape[0] 121 if (in_channels == group.number) and (out_channels % in_channels) == 0: 125 m_length = int(out_channels / in_channels)
|