Home
last modified time | relevance | path

Searched refs:alpha_val (Results 1 – 25 of 25) sorted by relevance

/aosp_15_r20/external/executorch/kernels/optimized/cpu/
H A Dop_sub.cpp38 run(const Tensor& a, const Tensor& b, CTYPE_IN alpha_val, Tensor& out) { in run()
41 [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { in run()
44 CTYPE_IN value = a_casted - alpha_val * b_casted; in run()
111 CTYPE alpha_val; in opt_sub_out() local
113 ctx, utils::extract_scalar(alpha, &alpha_val), InvalidArgument, ); in opt_sub_out()
120 [alpha_val, scalar_casted](Vec x) { in opt_sub_out()
121 return Vec(scalar_casted) - Vec(alpha_val) * x; in opt_sub_out()
128 [alpha_val, scalar_casted](Vec x) { in opt_sub_out()
129 return x - Vec(alpha_val * scalar_casted); in opt_sub_out()
153 CTYPE alpha_val; in opt_sub_out() local
[all …]
H A Dop_add.cpp37 run(const Tensor& a, const Tensor& b, CTYPE_IN alpha_val, Tensor& out) { in run()
40 [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { in run()
43 CTYPE_IN value = a_casted + alpha_val * b_casted; in run()
96 CTYPE alpha_val; in opt_add_out() local
98 ctx, utils::extract_scalar(alpha, &alpha_val), InvalidArgument, ); in opt_add_out()
104 [alpha_val, b_casted](Vec x) { in opt_add_out()
105 return x + Vec(alpha_val * b_casted); in opt_add_out()
130 CTYPE alpha_val; in opt_add_out() local
132 ctx, utils::extract_scalar(alpha, &alpha_val), InvalidArgument, ); in opt_add_out()
136 [alpha_val](Vec x, Vec y) { return x + Vec(alpha_val) * y; }, in opt_add_out()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A DLinearAlgebraKernel.cpp18 auto alpha_val = alpha.to<scalar_t>(); in addr_kernel() local
27 return alpha_val && vec1_val && vec2_val; in addr_kernel()
35 return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val); in addr_kernel()
47 auto alpha_val = alpha.to<scalar_t>(); in addr_kernel() local
50 auto alpha_vec = Vec(alpha_val); in addr_kernel()
60 return alpha_val * vec1_val * vec2_val; in addr_kernel()
73 return beta_val * self_val + alpha_val * vec1_val * vec2_val; in addr_kernel()
H A Dbatch_norm_kernel.cpp749 const opmath_t alpha_val = alpha_data[c]; in batch_norm_cpu_contiguous_impl() local
751 const fVec alpha_fvec(alpha_val); in batch_norm_cpu_contiguous_impl()
764 output_ptr[d] = scalar_t(opmath_t(input_ptr[d]) * alpha_val + beta_val); in batch_norm_cpu_contiguous_impl()
/aosp_15_r20/external/executorch/backends/cadence/hifi/operators/
H A Dop_add.cpp46 run(const Tensor& a, const Tensor& b, CTYPE_IN alpha_val, Tensor& out) { in run()
49 [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { in run()
52 CTYPE_IN value = a_casted + alpha_val * b_casted; in run()
121 float alpha_val; in add_out() local
122 torch::executor::native::utils::extract_scalar(alpha, &alpha_val); in add_out()
136 if ((out_type != ScalarType::Float) || (alpha_val != 1.0)) in add_out()
186 CTYPE_IN alpha_val; in add_out() local
187 torch::executor::native::utils::extract_scalar(alpha, &alpha_val); in add_out()
195 CTYPE_OUT>::run(a, b, alpha_val, out); in add_out()
H A Dop_sub.cpp46 run(const Tensor& a, const Tensor& b, CTYPE_IN alpha_val, Tensor& out) { in run()
49 [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { in run()
52 CTYPE_IN value = a_casted - alpha_val * b_casted; in run()
116 float alpha_val; in sub_out() local
117 torch::executor::native::utils::extract_scalar(alpha, &alpha_val); in sub_out()
131 if ((out_type != ScalarType::Float) || (alpha_val != 1.0)) in sub_out()
184 CTYPE_IN alpha_val; in sub_out() local
185 torch::executor::native::utils::extract_scalar(alpha, &alpha_val); in sub_out()
192 CTYPE_OUT>::run(a, b, alpha_val, out); in sub_out()
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/random/
H A Drandom_grad_test.py101 alpha_val = np.ones([1, 2])
106 {alpha: alpha_val, beta: beta_val, shape: [2, 1]})
107 self.assertAllEqual(grads_alpha_val.shape, alpha_val.shape)
126 alpha_val = np.logspace(-2, 3, dtype=np_dtype)
127 alpha = constant_op.constant(alpha_val)
134 u = special.gammainc(alpha_val, sample_val)
137 alpha_val, dx=delta * alpha_val)
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DLinearAlgebra.cu22 auto alpha_val = alpha.to<scalar_t>(); in addr_kernel_cuda() local
31 return alpha_val && vec1_val && vec2_val; in addr_kernel_cuda()
39 return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val); in addr_kernel_cuda()
49 auto alpha_val = alpha.to<scalar_t>(); in addr_kernel_cuda() local
59 return alpha_val * vec1_val * vec2_val; in addr_kernel_cuda()
67 return beta_val * self_val + alpha_val * vec1_val * vec2_val; in addr_kernel_cuda()
H A DDistributions.cu202 [] GPU_LAMBDA (scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t { in launch_dirichlet_grad_kernel()
203 return dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val); in launch_dirichlet_grad_kernel()
H A DBlas.cpp464 opmath_t alpha_val = alpha.to<opmath_t>(); in addmm_out_cuda_impl() local
475 alpha_val, in addmm_out_cuda_impl()
557 opmath_t alpha_val = alpha.to<opmath_t>(); in baddbmm_out_cuda_impl() local
569 alpha_val, in baddbmm_out_cuda_impl()
578 alpha_val, in baddbmm_out_cuda_impl()
/aosp_15_r20/external/executorch/backends/cadence/reference/operators/
H A Dop_add.cpp41 CTYPE_IN alpha_val; in add_out() local
42 ET_EXTRACT_SCALAR(alpha, alpha_val); in add_out()
45 [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { in add_out()
48 CTYPE_IN value = a_casted + alpha_val * b_casted; in add_out()
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_addmm.cpp60 CTYPE alpha_val = utils::scalar_to<CTYPE>(alpha); in addmm_out() local
77 alpha_val); in addmm_out()
92 [alpha_val, beta_val](const CTYPE val_a, const CTYPE val_b) { in addmm_out()
93 return val_a * alpha_val + val_b * beta_val; in addmm_out()
/aosp_15_r20/external/libxaac/decoder/
H A Dixheaacd_drc_freq_dec.c1030 WORD32 alpha_val = 0; in ixheaacd_drc_apply() local
1034 alpha_val = (j + num_qmf_sub_sample_by_2); in ixheaacd_drc_apply()
1036 WORD64 temp_drc = (WORD64)alpha_val * drc_freq_fac + in ixheaacd_drc_apply()
1037 (num_qmf_sub_sample - alpha_val) * in ixheaacd_drc_apply()
1062 alpha_val = 1; in ixheaacd_drc_apply()
1067 alpha_val = 0; in ixheaacd_drc_apply()
1075 alpha_val = 1; in ixheaacd_drc_apply()
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/
H A DBinaryOp.cpp63 float alpha_val = 1.0f; in add_binary_op_node() local
67 alpha_val = graph.extract_scalar<float>(alpha); in add_binary_op_node()
90 graph.create_params_buffer(alpha_val)}, in add_binary_op_node()
H A DLinear.cpp238 float alpha_val = 1.0f; in add_addmm_node() local
242 alpha_val = graph.extract_scalar<float>(alpha); in add_addmm_node()
248 Params params = {alpha_val, beta_val}; in add_addmm_node()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/
H A DSparseBlasImpl.cpp260 auto alpha_val = alpha.toComplexDouble(); in _compressed_row_strided_addmm_out() local
265 if (alpha_val != 1.) { in _compressed_row_strided_addmm_out()
286 if (alpha_val != 1.) { in _compressed_row_strided_addmm_out()
293 if (alpha_val != 1.) { in _compressed_row_strided_addmm_out()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DDistributions.cpp415 cpu_serial_kernel(iter, [](scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t{ in _dirichlet_grad_cpu() argument
416 return dirichlet_grad_one<scalar_t, double>(x_val, alpha_val, total_val); in _dirichlet_grad_cpu()
478 cpu_serial_kernel(iter, [generator](scalar_t alpha_val) -> scalar_t{ in _s_gamma_cpu() argument
490 …ma<scalar_t, double, decltype(uniform_lambda), decltype(normal_lambda)>(alpha_val, standard_unifor… in _s_gamma_cpu()
511 cpu_serial_kernel(iter1, [generator](scalar_t alpha_val) -> double{ in _s_dirichlet_cpu() argument
524 (alpha_val, standard_uniform, standard_normal); in _s_dirichlet_cpu()
/aosp_15_r20/external/pytorch/aten/src/ATen/cuda/
H A DCUDABlas.cpp1188 at::opmath_type<Dtype> alpha_val, in gemm_and_bias() argument
1285 &alpha_val, in gemm_and_bias()
1333 at::opmath_type<double> alpha_val,
1349 at::opmath_type<float> alpha_val,
1365 at::opmath_type<at::Half> alpha_val,
1381 at::opmath_type<at::BFloat16> alpha_val,
1417 const float alpha_val = 1.0; in scaled_gemm() local
1498 &alpha_val, in scaled_gemm()
1520 &alpha_val, in scaled_gemm()
1598 at::opmath_type<int32_t> alpha_val = 1; in int8_gemm() local
[all …]
H A DCUDABlas.h100 at::opmath_type<Dtype> alpha_val,
/aosp_15_r20/external/coreboot/src/soc/qualcomm/common/
H A Dclock.c172 if (cfg->alpha_val) in clock_configure_enable_gpll()
173 write32(cfg->reg_alpha, cfg->alpha_val); in clock_configure_enable_gpll()
/aosp_15_r20/external/tensorflow/tensorflow/core/grappler/optimizers/
H A Dremapper.cc1365 float alpha_val; in FindMulAndMaximum() local
1377 alpha_val = alpha_tensor.flat<float>()(0); in FindMulAndMaximum()
1388 alpha_val = alpha_tensor.flat<float>()(0); in FindMulAndMaximum()
1393 if (alpha_val < 0) { in FindMulAndMaximum()
2699 float alpha_val; in ReplaceMulMaximumWithLeakyRelu() local
2707 alpha_val = alpha_tensor.flat<float>()(0); in ReplaceMulMaximumWithLeakyRelu()
2708 SetAttrValue(alpha_val, &(*attr)["alpha"]); in ReplaceMulMaximumWithLeakyRelu()
2713 alpha_val = alpha_tensor.flat<float>()(0); in ReplaceMulMaximumWithLeakyRelu()
2714 SetAttrValue(alpha_val, &(*attr)["alpha"]); in ReplaceMulMaximumWithLeakyRelu()
/aosp_15_r20/external/coreboot/src/soc/qualcomm/common/include/soc/
H A Dclock_common.h73 u32 alpha_val; member
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/cuda/
H A DSparseCUDATensorMath.cu846 scalar_t alpha_val = alpha.to<scalar_t>(); in bmm_out_sparse_cuda()
911 (void*)&alpha_val, in bmm_out_sparse_cuda()
929 (void*)&alpha_val, in bmm_out_sparse_cuda()
/aosp_15_r20/external/coreboot/src/soc/qualcomm/sc7280/
H A Dclock.c320 gpll9_cfg.alpha_val = 0x1555; in clock_configure_sdcc2()
/aosp_15_r20/external/pytorch/torch/testing/_internal/
H A Dcommon_methods_invocations.py1162 alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)
1174 kwargs = dict(alpha=alpha_val, beta=beta_val)