1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/native/TensorIterator.h>
3 #include <ATen/native/cpu/Loops.h>
4 #include <ATen/core/Tensor.h>
5 #include <ATen/Dispatch.h>
6
7 #ifndef AT_PER_OPERATOR_HEADERS
8 #include <ATen/Functions.h>
9 #else
10 #include <ATen/ops/_empty_affine_quantized.h>
11 #include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
12 #endif
13
14 namespace at {
15 namespace native {
16
make_per_tensor_quantized_tensor_cpu(const Tensor & self,double scale,int64_t zero_point)17 Tensor make_per_tensor_quantized_tensor_cpu(
18 const Tensor& self,
19 double scale,
20 int64_t zero_point) {
21 Tensor dst = at::_empty_affine_quantized(
22 self.sizes(),
23 self.options().dtype(toQIntType(self.scalar_type())),
24 scale,
25 zero_point,
26 self.suggest_memory_format());
27 Tensor self_contig = self.contiguous(self.suggest_memory_format());
28 AT_DISPATCH_QINT_TYPES(
29 dst.scalar_type(), "make_per_tensor_quantized_tensor", [&]() {
30 underlying_t* self_data = self_contig.data_ptr<underlying_t>();
31 underlying_t* dst_data =
32 reinterpret_cast<underlying_t*>(dst.data_ptr<scalar_t>());
33 if (self.numel() > 0) {
34 memcpy(dst_data, self_data, self.nbytes());
35 }
36 });
37 return dst;
38 }
39
40 } // namespace native
41 } // namespace at
42