xref: /aosp_15_r20/external/pytorch/torch/_tensor_docs.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2"""Adds docstrings to Tensor functions"""
3
4import torch._C
5from torch._C import _add_docstr as add_docstr
6from torch._torch_docs import parse_kwargs, reproducibility_notes
7
8
9def add_docstr_all(method, docstr):
10    add_docstr(getattr(torch._C.TensorBase, method), docstr)
11
12
13common_args = parse_kwargs(
14    """
15    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
16        returned Tensor. Default: ``torch.preserve_format``.
17"""
18)
19
20new_common_args = parse_kwargs(
21    """
22    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
23        shape of the output tensor.
24    dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
25        Default: if None, same :class:`torch.dtype` as this tensor.
26    device (:class:`torch.device`, optional): the desired device of returned tensor.
27        Default: if None, same :class:`torch.device` as this tensor.
28    requires_grad (bool, optional): If autograd should record operations on the
29        returned tensor. Default: ``False``.
30    pin_memory (bool, optional): If set, returned tensor would be allocated in
31        the pinned memory. Works only for CPU tensors. Default: ``False``.
32    layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
33        Default: ``torch.strided``.
34"""
35)
36
37add_docstr_all(
38    "new_tensor",
39    """
40new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
41pin_memory=False) -> Tensor
42"""
43    + r"""
44
45Returns a new Tensor with :attr:`data` as the tensor data.
46By default, the returned Tensor has the same :class:`torch.dtype` and
47:class:`torch.device` as this tensor.
48
49.. warning::
50
51    :func:`new_tensor` always copies :attr:`data`. If you have a Tensor
52    ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
53    or :func:`torch.Tensor.detach`.
54    If you have a numpy array and want to avoid a copy, use
55    :func:`torch.from_numpy`.
56
57.. warning::
58
59    When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
60    and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
61    and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
62    The equivalents using ``clone()`` and ``detach()`` are recommended.
63
64Args:
65    data (array_like): The returned Tensor copies :attr:`data`.
66
67Keyword args:
68    {dtype}
69    {device}
70    {requires_grad}
71    {layout}
72    {pin_memory}
73
74Example::
75
76    >>> tensor = torch.ones((2,), dtype=torch.int8)
77    >>> data = [[0, 1], [2, 3]]
78    >>> tensor.new_tensor(data)
79    tensor([[ 0,  1],
80            [ 2,  3]], dtype=torch.int8)
81
82""".format(**new_common_args),
83)
84
85add_docstr_all(
86    "new_full",
87    """
88new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
89pin_memory=False) -> Tensor
90"""
91    + r"""
92
93Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
94By default, the returned Tensor has the same :class:`torch.dtype` and
95:class:`torch.device` as this tensor.
96
97Args:
98    fill_value (scalar): the number to fill the output tensor with.
99
100Keyword args:
101    {dtype}
102    {device}
103    {requires_grad}
104    {layout}
105    {pin_memory}
106
107Example::
108
109    >>> tensor = torch.ones((2,), dtype=torch.float64)
110    >>> tensor.new_full((3, 4), 3.141592)
111    tensor([[ 3.1416,  3.1416,  3.1416,  3.1416],
112            [ 3.1416,  3.1416,  3.1416,  3.1416],
113            [ 3.1416,  3.1416,  3.1416,  3.1416]], dtype=torch.float64)
114
115""".format(**new_common_args),
116)
117
118add_docstr_all(
119    "new_empty",
120    """
121new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
122pin_memory=False) -> Tensor
123"""
124    + r"""
125
126Returns a Tensor of size :attr:`size` filled with uninitialized data.
127By default, the returned Tensor has the same :class:`torch.dtype` and
128:class:`torch.device` as this tensor.
129
130Args:
131    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
132        shape of the output tensor.
133
134Keyword args:
135    {dtype}
136    {device}
137    {requires_grad}
138    {layout}
139    {pin_memory}
140
141Example::
142
143    >>> tensor = torch.ones(())
144    >>> tensor.new_empty((2, 3))
145    tensor([[ 5.8182e-18,  4.5765e-41, -1.0545e+30],
146            [ 3.0949e-41,  4.4842e-44,  0.0000e+00]])
147
148""".format(**new_common_args),
149)
150
151add_docstr_all(
152    "new_empty_strided",
153    """
154new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
155pin_memory=False) -> Tensor
156"""
157    + r"""
158
159Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
160uninitialized data. By default, the returned Tensor has the same
161:class:`torch.dtype` and :class:`torch.device` as this tensor.
162
163Args:
164    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
165        shape of the output tensor.
166
167Keyword args:
168    {dtype}
169    {device}
170    {requires_grad}
171    {layout}
172    {pin_memory}
173
174Example::
175
176    >>> tensor = torch.ones(())
177    >>> tensor.new_empty_strided((2, 3), (3, 1))
178    tensor([[ 5.8182e-18,  4.5765e-41, -1.0545e+30],
179            [ 3.0949e-41,  4.4842e-44,  0.0000e+00]])
180
181""".format(**new_common_args),
182)
183
184add_docstr_all(
185    "new_ones",
186    """
187new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
188pin_memory=False) -> Tensor
189"""
190    + r"""
191
192Returns a Tensor of size :attr:`size` filled with ``1``.
193By default, the returned Tensor has the same :class:`torch.dtype` and
194:class:`torch.device` as this tensor.
195
196Args:
197    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
198        shape of the output tensor.
199
200Keyword args:
201    {dtype}
202    {device}
203    {requires_grad}
204    {layout}
205    {pin_memory}
206
207Example::
208
209    >>> tensor = torch.tensor((), dtype=torch.int32)
210    >>> tensor.new_ones((2, 3))
211    tensor([[ 1,  1,  1],
212            [ 1,  1,  1]], dtype=torch.int32)
213
214""".format(**new_common_args),
215)
216
217add_docstr_all(
218    "new_zeros",
219    """
220new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
221pin_memory=False) -> Tensor
222"""
223    + r"""
224
225Returns a Tensor of size :attr:`size` filled with ``0``.
226By default, the returned Tensor has the same :class:`torch.dtype` and
227:class:`torch.device` as this tensor.
228
229Args:
230    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
231        shape of the output tensor.
232
233Keyword args:
234    {dtype}
235    {device}
236    {requires_grad}
237    {layout}
238    {pin_memory}
239
240Example::
241
242    >>> tensor = torch.tensor((), dtype=torch.float64)
243    >>> tensor.new_zeros((2, 3))
244    tensor([[ 0.,  0.,  0.],
245            [ 0.,  0.,  0.]], dtype=torch.float64)
246
247""".format(**new_common_args),
248)
249
250add_docstr_all(
251    "abs",
252    r"""
253abs() -> Tensor
254
255See :func:`torch.abs`
256""",
257)
258
259add_docstr_all(
260    "abs_",
261    r"""
262abs_() -> Tensor
263
264In-place version of :meth:`~Tensor.abs`
265""",
266)
267
268add_docstr_all(
269    "absolute",
270    r"""
271absolute() -> Tensor
272
273Alias for :func:`abs`
274""",
275)
276
277add_docstr_all(
278    "absolute_",
279    r"""
280absolute_() -> Tensor
281
282In-place version of :meth:`~Tensor.absolute`
283Alias for :func:`abs_`
284""",
285)
286
287add_docstr_all(
288    "acos",
289    r"""
290acos() -> Tensor
291
292See :func:`torch.acos`
293""",
294)
295
296add_docstr_all(
297    "acos_",
298    r"""
299acos_() -> Tensor
300
301In-place version of :meth:`~Tensor.acos`
302""",
303)
304
305add_docstr_all(
306    "arccos",
307    r"""
308arccos() -> Tensor
309
310See :func:`torch.arccos`
311""",
312)
313
314add_docstr_all(
315    "arccos_",
316    r"""
317arccos_() -> Tensor
318
319In-place version of :meth:`~Tensor.arccos`
320""",
321)
322
323add_docstr_all(
324    "acosh",
325    r"""
326acosh() -> Tensor
327
328See :func:`torch.acosh`
329""",
330)
331
332add_docstr_all(
333    "acosh_",
334    r"""
335acosh_() -> Tensor
336
337In-place version of :meth:`~Tensor.acosh`
338""",
339)
340
341add_docstr_all(
342    "arccosh",
343    r"""
344acosh() -> Tensor
345
346See :func:`torch.arccosh`
347""",
348)
349
350add_docstr_all(
351    "arccosh_",
352    r"""
353acosh_() -> Tensor
354
355In-place version of :meth:`~Tensor.arccosh`
356""",
357)
358
359add_docstr_all(
360    "add",
361    r"""
362add(other, *, alpha=1) -> Tensor
363
364Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
365and :attr:`other` are specified, each element of :attr:`other` is scaled by
366:attr:`alpha` before being used.
367
368When :attr:`other` is a tensor, the shape of :attr:`other` must be
369:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
370tensor
371
372See :func:`torch.add`
373""",
374)
375
376add_docstr_all(
377    "add_",
378    r"""
379add_(other, *, alpha=1) -> Tensor
380
381In-place version of :meth:`~Tensor.add`
382""",
383)
384
385add_docstr_all(
386    "addbmm",
387    r"""
388addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
389
390See :func:`torch.addbmm`
391""",
392)
393
394add_docstr_all(
395    "addbmm_",
396    r"""
397addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
398
399In-place version of :meth:`~Tensor.addbmm`
400""",
401)
402
403add_docstr_all(
404    "addcdiv",
405    r"""
406addcdiv(tensor1, tensor2, *, value=1) -> Tensor
407
408See :func:`torch.addcdiv`
409""",
410)
411
412add_docstr_all(
413    "addcdiv_",
414    r"""
415addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
416
417In-place version of :meth:`~Tensor.addcdiv`
418""",
419)
420
421add_docstr_all(
422    "addcmul",
423    r"""
424addcmul(tensor1, tensor2, *, value=1) -> Tensor
425
426See :func:`torch.addcmul`
427""",
428)
429
430add_docstr_all(
431    "addcmul_",
432    r"""
433addcmul_(tensor1, tensor2, *, value=1) -> Tensor
434
435In-place version of :meth:`~Tensor.addcmul`
436""",
437)
438
439add_docstr_all(
440    "addmm",
441    r"""
442addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
443
444See :func:`torch.addmm`
445""",
446)
447
448add_docstr_all(
449    "addmm_",
450    r"""
451addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
452
453In-place version of :meth:`~Tensor.addmm`
454""",
455)
456
457add_docstr_all(
458    "addmv",
459    r"""
460addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
461
462See :func:`torch.addmv`
463""",
464)
465
466add_docstr_all(
467    "addmv_",
468    r"""
469addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
470
471In-place version of :meth:`~Tensor.addmv`
472""",
473)
474
475add_docstr_all(
476    "sspaddmm",
477    r"""
478sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
479
480See :func:`torch.sspaddmm`
481""",
482)
483
484add_docstr_all(
485    "smm",
486    r"""
487smm(mat) -> Tensor
488
489See :func:`torch.smm`
490""",
491)
492
493add_docstr_all(
494    "addr",
495    r"""
496addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
497
498See :func:`torch.addr`
499""",
500)
501
502add_docstr_all(
503    "addr_",
504    r"""
505addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
506
507In-place version of :meth:`~Tensor.addr`
508""",
509)
510
511add_docstr_all(
512    "align_as",
513    r"""
514align_as(other) -> Tensor
515
516Permutes the dimensions of the :attr:`self` tensor to match the dimension order
517in the :attr:`other` tensor, adding size-one dims for any new names.
518
519This operation is useful for explicit broadcasting by names (see examples).
520
521All of the dims of :attr:`self` must be named in order to use this method.
522The resulting tensor is a view on the original tensor.
523
524All dimension names of :attr:`self` must be present in ``other.names``.
525:attr:`other` may contain named dimensions that are not in ``self.names``;
526the output tensor has a size-one dimension for each of those new names.
527
528To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
529
530Examples::
531
532    # Example 1: Applying a mask
533    >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
534    >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
535    >>> imgs.masked_fill_(mask.align_as(imgs), 0)
536
537
538    # Example 2: Applying a per-channel-scale
539    >>> def scale_channels(input, scale):
540    >>>    scale = scale.refine_names('C')
541    >>>    return input * scale.align_as(input)
542
543    >>> num_channels = 3
544    >>> scale = torch.randn(num_channels, names=('C',))
545    >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
546    >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
547    >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
548
549    # scale_channels is agnostic to the dimension order of the input
550    >>> scale_channels(imgs, scale)
551    >>> scale_channels(more_imgs, scale)
552    >>> scale_channels(videos, scale)
553
554.. warning::
555    The named tensor API is experimental and subject to change.
556
557""",
558)
559
560add_docstr_all(
561    "all",
562    r"""
563all(dim=None, keepdim=False) -> Tensor
564
565See :func:`torch.all`
566""",
567)
568
569add_docstr_all(
570    "allclose",
571    r"""
572allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
573
574See :func:`torch.allclose`
575""",
576)
577
578add_docstr_all(
579    "angle",
580    r"""
581angle() -> Tensor
582
583See :func:`torch.angle`
584""",
585)
586
587add_docstr_all(
588    "any",
589    r"""
590any(dim=None, keepdim=False) -> Tensor
591
592See :func:`torch.any`
593""",
594)
595
596add_docstr_all(
597    "apply_",
598    r"""
599apply_(callable) -> Tensor
600
601Applies the function :attr:`callable` to each element in the tensor, replacing
602each element with the value returned by :attr:`callable`.
603
604.. note::
605
606    This function only works with CPU tensors and should not be used in code
607    sections that require high performance.
608""",
609)
610
611add_docstr_all(
612    "asin",
613    r"""
614asin() -> Tensor
615
616See :func:`torch.asin`
617""",
618)
619
620add_docstr_all(
621    "asin_",
622    r"""
623asin_() -> Tensor
624
625In-place version of :meth:`~Tensor.asin`
626""",
627)
628
629add_docstr_all(
630    "arcsin",
631    r"""
632arcsin() -> Tensor
633
634See :func:`torch.arcsin`
635""",
636)
637
638add_docstr_all(
639    "arcsin_",
640    r"""
641arcsin_() -> Tensor
642
643In-place version of :meth:`~Tensor.arcsin`
644""",
645)
646
647add_docstr_all(
648    "asinh",
649    r"""
650asinh() -> Tensor
651
652See :func:`torch.asinh`
653""",
654)
655
656add_docstr_all(
657    "asinh_",
658    r"""
659asinh_() -> Tensor
660
661In-place version of :meth:`~Tensor.asinh`
662""",
663)
664
665add_docstr_all(
666    "arcsinh",
667    r"""
668arcsinh() -> Tensor
669
670See :func:`torch.arcsinh`
671""",
672)
673
674add_docstr_all(
675    "arcsinh_",
676    r"""
677arcsinh_() -> Tensor
678
679In-place version of :meth:`~Tensor.arcsinh`
680""",
681)
682
683add_docstr_all(
684    "as_strided",
685    r"""
686as_strided(size, stride, storage_offset=None) -> Tensor
687
688See :func:`torch.as_strided`
689""",
690)
691
692add_docstr_all(
693    "as_strided_",
694    r"""
695as_strided_(size, stride, storage_offset=None) -> Tensor
696
697In-place version of :meth:`~Tensor.as_strided`
698""",
699)
700
701add_docstr_all(
702    "atan",
703    r"""
704atan() -> Tensor
705
706See :func:`torch.atan`
707""",
708)
709
710add_docstr_all(
711    "atan_",
712    r"""
713atan_() -> Tensor
714
715In-place version of :meth:`~Tensor.atan`
716""",
717)
718
719add_docstr_all(
720    "arctan",
721    r"""
722arctan() -> Tensor
723
724See :func:`torch.arctan`
725""",
726)
727
728add_docstr_all(
729    "arctan_",
730    r"""
731arctan_() -> Tensor
732
733In-place version of :meth:`~Tensor.arctan`
734""",
735)
736
737add_docstr_all(
738    "atan2",
739    r"""
740atan2(other) -> Tensor
741
742See :func:`torch.atan2`
743""",
744)
745
746add_docstr_all(
747    "atan2_",
748    r"""
749atan2_(other) -> Tensor
750
751In-place version of :meth:`~Tensor.atan2`
752""",
753)
754
755add_docstr_all(
756    "arctan2",
757    r"""
758arctan2(other) -> Tensor
759
760See :func:`torch.arctan2`
761""",
762)
763
764add_docstr_all(
765    "arctan2_",
766    r"""
767atan2_(other) -> Tensor
768
769In-place version of :meth:`~Tensor.arctan2`
770""",
771)
772
773add_docstr_all(
774    "atanh",
775    r"""
776atanh() -> Tensor
777
778See :func:`torch.atanh`
779""",
780)
781
782add_docstr_all(
783    "atanh_",
784    r"""
785atanh_(other) -> Tensor
786
787In-place version of :meth:`~Tensor.atanh`
788""",
789)
790
791add_docstr_all(
792    "arctanh",
793    r"""
794arctanh() -> Tensor
795
796See :func:`torch.arctanh`
797""",
798)
799
800add_docstr_all(
801    "arctanh_",
802    r"""
803arctanh_(other) -> Tensor
804
805In-place version of :meth:`~Tensor.arctanh`
806""",
807)
808
809add_docstr_all(
810    "baddbmm",
811    r"""
812baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
813
814See :func:`torch.baddbmm`
815""",
816)
817
818add_docstr_all(
819    "baddbmm_",
820    r"""
821baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
822
823In-place version of :meth:`~Tensor.baddbmm`
824""",
825)
826
827add_docstr_all(
828    "bernoulli",
829    r"""
830bernoulli(*, generator=None) -> Tensor
831
832Returns a result tensor where each :math:`\texttt{result[i]}` is independently
833sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
834floating point ``dtype``, and the result will have the same ``dtype``.
835
836See :func:`torch.bernoulli`
837""",
838)
839
840add_docstr_all(
841    "bernoulli_",
842    r"""
843bernoulli_(p=0.5, *, generator=None) -> Tensor
844
845Fills each location of :attr:`self` with an independent sample from
846:math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
847``dtype``.
848
849:attr:`p` should either be a scalar or tensor containing probabilities to be
850used for drawing the binary random number.
851
852If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
853will be set to a value sampled from
854:math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
855floating point ``dtype``.
856
857See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
858""",
859)
860
861add_docstr_all(
862    "bincount",
863    r"""
864bincount(weights=None, minlength=0) -> Tensor
865
866See :func:`torch.bincount`
867""",
868)
869
870add_docstr_all(
871    "bitwise_not",
872    r"""
873bitwise_not() -> Tensor
874
875See :func:`torch.bitwise_not`
876""",
877)
878
879add_docstr_all(
880    "bitwise_not_",
881    r"""
882bitwise_not_() -> Tensor
883
884In-place version of :meth:`~Tensor.bitwise_not`
885""",
886)
887
888add_docstr_all(
889    "bitwise_and",
890    r"""
891bitwise_and() -> Tensor
892
893See :func:`torch.bitwise_and`
894""",
895)
896
897add_docstr_all(
898    "bitwise_and_",
899    r"""
900bitwise_and_() -> Tensor
901
902In-place version of :meth:`~Tensor.bitwise_and`
903""",
904)
905
906add_docstr_all(
907    "bitwise_or",
908    r"""
909bitwise_or() -> Tensor
910
911See :func:`torch.bitwise_or`
912""",
913)
914
915add_docstr_all(
916    "bitwise_or_",
917    r"""
918bitwise_or_() -> Tensor
919
920In-place version of :meth:`~Tensor.bitwise_or`
921""",
922)
923
924add_docstr_all(
925    "bitwise_xor",
926    r"""
927bitwise_xor() -> Tensor
928
929See :func:`torch.bitwise_xor`
930""",
931)
932
933add_docstr_all(
934    "bitwise_xor_",
935    r"""
936bitwise_xor_() -> Tensor
937
938In-place version of :meth:`~Tensor.bitwise_xor`
939""",
940)
941
942add_docstr_all(
943    "bitwise_left_shift",
944    r"""
945bitwise_left_shift(other) -> Tensor
946
947See :func:`torch.bitwise_left_shift`
948""",
949)
950
951add_docstr_all(
952    "bitwise_left_shift_",
953    r"""
954bitwise_left_shift_(other) -> Tensor
955
956In-place version of :meth:`~Tensor.bitwise_left_shift`
957""",
958)
959
960add_docstr_all(
961    "bitwise_right_shift",
962    r"""
963bitwise_right_shift(other) -> Tensor
964
965See :func:`torch.bitwise_right_shift`
966""",
967)
968
969add_docstr_all(
970    "bitwise_right_shift_",
971    r"""
972bitwise_right_shift_(other) -> Tensor
973
974In-place version of :meth:`~Tensor.bitwise_right_shift`
975""",
976)
977
978add_docstr_all(
979    "broadcast_to",
980    r"""
981broadcast_to(shape) -> Tensor
982
983See :func:`torch.broadcast_to`.
984""",
985)
986
987add_docstr_all(
988    "logical_and",
989    r"""
990logical_and() -> Tensor
991
992See :func:`torch.logical_and`
993""",
994)
995
996add_docstr_all(
997    "logical_and_",
998    r"""
999logical_and_() -> Tensor
1000
1001In-place version of :meth:`~Tensor.logical_and`
1002""",
1003)
1004
1005add_docstr_all(
1006    "logical_not",
1007    r"""
1008logical_not() -> Tensor
1009
1010See :func:`torch.logical_not`
1011""",
1012)
1013
1014add_docstr_all(
1015    "logical_not_",
1016    r"""
1017logical_not_() -> Tensor
1018
1019In-place version of :meth:`~Tensor.logical_not`
1020""",
1021)
1022
1023add_docstr_all(
1024    "logical_or",
1025    r"""
1026logical_or() -> Tensor
1027
1028See :func:`torch.logical_or`
1029""",
1030)
1031
1032add_docstr_all(
1033    "logical_or_",
1034    r"""
1035logical_or_() -> Tensor
1036
1037In-place version of :meth:`~Tensor.logical_or`
1038""",
1039)
1040
1041add_docstr_all(
1042    "logical_xor",
1043    r"""
1044logical_xor() -> Tensor
1045
1046See :func:`torch.logical_xor`
1047""",
1048)
1049
1050add_docstr_all(
1051    "logical_xor_",
1052    r"""
1053logical_xor_() -> Tensor
1054
1055In-place version of :meth:`~Tensor.logical_xor`
1056""",
1057)
1058
1059add_docstr_all(
1060    "bmm",
1061    r"""
1062bmm(batch2) -> Tensor
1063
1064See :func:`torch.bmm`
1065""",
1066)
1067
1068add_docstr_all(
1069    "cauchy_",
1070    r"""
1071cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
1072
1073Fills the tensor with numbers drawn from the Cauchy distribution:
1074
1075.. math::
1076
1077    f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
1078
1079.. note::
1080  Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution.
1081""",
1082)
1083
1084add_docstr_all(
1085    "ceil",
1086    r"""
1087ceil() -> Tensor
1088
1089See :func:`torch.ceil`
1090""",
1091)
1092
1093add_docstr_all(
1094    "ceil_",
1095    r"""
1096ceil_() -> Tensor
1097
1098In-place version of :meth:`~Tensor.ceil`
1099""",
1100)
1101
1102add_docstr_all(
1103    "cholesky",
1104    r"""
1105cholesky(upper=False) -> Tensor
1106
1107See :func:`torch.cholesky`
1108""",
1109)
1110
1111add_docstr_all(
1112    "cholesky_solve",
1113    r"""
1114cholesky_solve(input2, upper=False) -> Tensor
1115
1116See :func:`torch.cholesky_solve`
1117""",
1118)
1119
1120add_docstr_all(
1121    "cholesky_inverse",
1122    r"""
1123cholesky_inverse(upper=False) -> Tensor
1124
1125See :func:`torch.cholesky_inverse`
1126""",
1127)
1128
1129add_docstr_all(
1130    "clamp",
1131    r"""
1132clamp(min=None, max=None) -> Tensor
1133
1134See :func:`torch.clamp`
1135""",
1136)
1137
1138add_docstr_all(
1139    "clamp_",
1140    r"""
1141clamp_(min=None, max=None) -> Tensor
1142
1143In-place version of :meth:`~Tensor.clamp`
1144""",
1145)
1146
1147add_docstr_all(
1148    "clip",
1149    r"""
1150clip(min=None, max=None) -> Tensor
1151
1152Alias for :meth:`~Tensor.clamp`.
1153""",
1154)
1155
1156add_docstr_all(
1157    "clip_",
1158    r"""
1159clip_(min=None, max=None) -> Tensor
1160
1161Alias for :meth:`~Tensor.clamp_`.
1162""",
1163)
1164
1165add_docstr_all(
1166    "clone",
1167    r"""
1168clone(*, memory_format=torch.preserve_format) -> Tensor
1169
1170See :func:`torch.clone`
1171""".format(**common_args),
1172)
1173
1174add_docstr_all(
1175    "coalesce",
1176    r"""
1177coalesce() -> Tensor
1178
1179Returns a coalesced copy of :attr:`self` if :attr:`self` is an
1180:ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
1181
1182Returns :attr:`self` if :attr:`self` is a coalesced tensor.
1183
1184.. warning::
1185  Throws an error if :attr:`self` is not a sparse COO tensor.
1186""",
1187)
1188
1189add_docstr_all(
1190    "contiguous",
1191    r"""
1192contiguous(memory_format=torch.contiguous_format) -> Tensor
1193
1194Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
1195:attr:`self` tensor is already in the specified memory format, this function returns the
1196:attr:`self` tensor.
1197
1198Args:
1199    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
1200        returned Tensor. Default: ``torch.contiguous_format``.
1201""",
1202)
1203
1204add_docstr_all(
1205    "copy_",
1206    r"""
1207copy_(src, non_blocking=False) -> Tensor
1208
1209Copies the elements from :attr:`src` into :attr:`self` tensor and returns
1210:attr:`self`.
1211
1212The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
1213with the :attr:`self` tensor. It may be of a different data type or reside on a
1214different device.
1215
1216Args:
1217    src (Tensor): the source tensor to copy from
1218    non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
1219        the copy may occur asynchronously with respect to the host. For other
1220        cases, this argument has no effect.
1221""",
1222)
1223
1224add_docstr_all(
1225    "conj",
1226    r"""
1227conj() -> Tensor
1228
1229See :func:`torch.conj`
1230""",
1231)
1232
1233add_docstr_all(
1234    "conj_physical",
1235    r"""
1236conj_physical() -> Tensor
1237
1238See :func:`torch.conj_physical`
1239""",
1240)
1241
1242add_docstr_all(
1243    "conj_physical_",
1244    r"""
1245conj_physical_() -> Tensor
1246
1247In-place version of :meth:`~Tensor.conj_physical`
1248""",
1249)
1250
1251add_docstr_all(
1252    "resolve_conj",
1253    r"""
1254resolve_conj() -> Tensor
1255
1256See :func:`torch.resolve_conj`
1257""",
1258)
1259
1260add_docstr_all(
1261    "resolve_neg",
1262    r"""
1263resolve_neg() -> Tensor
1264
1265See :func:`torch.resolve_neg`
1266""",
1267)
1268
1269add_docstr_all(
1270    "copysign",
1271    r"""
1272copysign(other) -> Tensor
1273
1274See :func:`torch.copysign`
1275""",
1276)
1277
1278add_docstr_all(
1279    "copysign_",
1280    r"""
1281copysign_(other) -> Tensor
1282
1283In-place version of :meth:`~Tensor.copysign`
1284""",
1285)
1286
1287add_docstr_all(
1288    "cos",
1289    r"""
1290cos() -> Tensor
1291
1292See :func:`torch.cos`
1293""",
1294)
1295
1296add_docstr_all(
1297    "cos_",
1298    r"""
1299cos_() -> Tensor
1300
1301In-place version of :meth:`~Tensor.cos`
1302""",
1303)
1304
1305add_docstr_all(
1306    "cosh",
1307    r"""
1308cosh() -> Tensor
1309
1310See :func:`torch.cosh`
1311""",
1312)
1313
1314add_docstr_all(
1315    "cosh_",
1316    r"""
1317cosh_() -> Tensor
1318
1319In-place version of :meth:`~Tensor.cosh`
1320""",
1321)
1322
1323add_docstr_all(
1324    "cpu",
1325    r"""
1326cpu(memory_format=torch.preserve_format) -> Tensor
1327
1328Returns a copy of this object in CPU memory.
1329
1330If this object is already in CPU memory and on the correct device,
1331then no copy is performed and the original object is returned.
1332
1333Args:
1334    {memory_format}
1335
1336""".format(**common_args),
1337)
1338
1339add_docstr_all(
1340    "count_nonzero",
1341    r"""
1342count_nonzero(dim=None) -> Tensor
1343
1344See :func:`torch.count_nonzero`
1345""",
1346)
1347
1348add_docstr_all(
1349    "cov",
1350    r"""
1351cov(*, correction=1, fweights=None, aweights=None) -> Tensor
1352
1353See :func:`torch.cov`
1354""",
1355)
1356
1357add_docstr_all(
1358    "corrcoef",
1359    r"""
1360corrcoef() -> Tensor
1361
1362See :func:`torch.corrcoef`
1363""",
1364)
1365
1366add_docstr_all(
1367    "cross",
1368    r"""
1369cross(other, dim=None) -> Tensor
1370
1371See :func:`torch.cross`
1372""",
1373)
1374
1375add_docstr_all(
1376    "cuda",
1377    r"""
1378cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1379
1380Returns a copy of this object in CUDA memory.
1381
1382If this object is already in CUDA memory and on the correct device,
1383then no copy is performed and the original object is returned.
1384
1385Args:
1386    device (:class:`torch.device`): The destination GPU device.
1387        Defaults to the current CUDA device.
1388    non_blocking (bool): If ``True`` and the source is in pinned memory,
1389        the copy will be asynchronous with respect to the host.
1390        Otherwise, the argument has no effect. Default: ``False``.
1391    {memory_format}
1392""".format(**common_args),
1393)
1394
1395add_docstr_all(
1396    "mtia",
1397    r"""
1398mtia(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1399
1400Returns a copy of this object in MTIA memory.
1401
1402If this object is already in MTIA memory and on the correct device,
1403then no copy is performed and the original object is returned.
1404
1405Args:
1406    device (:class:`torch.device`): The destination MTIA device.
1407        Defaults to the current MTIA device.
1408    non_blocking (bool): If ``True`` and the source is in pinned memory,
1409        the copy will be asynchronous with respect to the host.
1410        Otherwise, the argument has no effect. Default: ``False``.
1411    {memory_format}
1412""".format(**common_args),
1413)
1414
1415add_docstr_all(
1416    "ipu",
1417    r"""
1418ipu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1419
1420Returns a copy of this object in IPU memory.
1421
1422If this object is already in IPU memory and on the correct device,
1423then no copy is performed and the original object is returned.
1424
1425Args:
1426    device (:class:`torch.device`): The destination IPU device.
1427        Defaults to the current IPU device.
1428    non_blocking (bool): If ``True`` and the source is in pinned memory,
1429        the copy will be asynchronous with respect to the host.
1430        Otherwise, the argument has no effect. Default: ``False``.
1431    {memory_format}
1432""".format(**common_args),
1433)
1434
1435add_docstr_all(
1436    "xpu",
1437    r"""
1438xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1439
1440Returns a copy of this object in XPU memory.
1441
1442If this object is already in XPU memory and on the correct device,
1443then no copy is performed and the original object is returned.
1444
1445Args:
1446    device (:class:`torch.device`): The destination XPU device.
1447        Defaults to the current XPU device.
1448    non_blocking (bool): If ``True`` and the source is in pinned memory,
1449        the copy will be asynchronous with respect to the host.
1450        Otherwise, the argument has no effect. Default: ``False``.
1451    {memory_format}
1452""".format(**common_args),
1453)
1454
1455add_docstr_all(
1456    "logcumsumexp",
1457    r"""
1458logcumsumexp(dim) -> Tensor
1459
1460See :func:`torch.logcumsumexp`
1461""",
1462)
1463
1464add_docstr_all(
1465    "cummax",
1466    r"""
1467cummax(dim) -> (Tensor, Tensor)
1468
1469See :func:`torch.cummax`
1470""",
1471)
1472
1473add_docstr_all(
1474    "cummin",
1475    r"""
1476cummin(dim) -> (Tensor, Tensor)
1477
1478See :func:`torch.cummin`
1479""",
1480)
1481
1482add_docstr_all(
1483    "cumprod",
1484    r"""
1485cumprod(dim, dtype=None) -> Tensor
1486
1487See :func:`torch.cumprod`
1488""",
1489)
1490
1491add_docstr_all(
1492    "cumprod_",
1493    r"""
1494cumprod_(dim, dtype=None) -> Tensor
1495
1496In-place version of :meth:`~Tensor.cumprod`
1497""",
1498)
1499
1500add_docstr_all(
1501    "cumsum",
1502    r"""
1503cumsum(dim, dtype=None) -> Tensor
1504
1505See :func:`torch.cumsum`
1506""",
1507)
1508
1509add_docstr_all(
1510    "cumsum_",
1511    r"""
1512cumsum_(dim, dtype=None) -> Tensor
1513
1514In-place version of :meth:`~Tensor.cumsum`
1515""",
1516)
1517
1518add_docstr_all(
1519    "data_ptr",
1520    r"""
1521data_ptr() -> int
1522
1523Returns the address of the first element of :attr:`self` tensor.
1524""",
1525)
1526
1527add_docstr_all(
1528    "dequantize",
1529    r"""
1530dequantize() -> Tensor
1531
1532Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
1533""",
1534)
1535
1536add_docstr_all(
1537    "dense_dim",
1538    r"""
1539dense_dim() -> int
1540
1541Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
1542
1543.. note::
1544  Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor.
1545
1546See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
1547""",
1548)
1549
1550add_docstr_all(
1551    "diag",
1552    r"""
1553diag(diagonal=0) -> Tensor
1554
1555See :func:`torch.diag`
1556""",
1557)
1558
1559add_docstr_all(
1560    "diag_embed",
1561    r"""
1562diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
1563
1564See :func:`torch.diag_embed`
1565""",
1566)
1567
1568add_docstr_all(
1569    "diagflat",
1570    r"""
1571diagflat(offset=0) -> Tensor
1572
1573See :func:`torch.diagflat`
1574""",
1575)
1576
1577add_docstr_all(
1578    "diagonal",
1579    r"""
1580diagonal(offset=0, dim1=0, dim2=1) -> Tensor
1581
1582See :func:`torch.diagonal`
1583""",
1584)
1585
1586add_docstr_all(
1587    "diagonal_scatter",
1588    r"""
1589diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor
1590
1591See :func:`torch.diagonal_scatter`
1592""",
1593)
1594
1595add_docstr_all(
1596    "as_strided_scatter",
1597    r"""
1598as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor
1599
1600See :func:`torch.as_strided_scatter`
1601""",
1602)
1603
1604add_docstr_all(
1605    "fill_diagonal_",
1606    r"""
1607fill_diagonal_(fill_value, wrap=False) -> Tensor
1608
1609Fill the main diagonal of a tensor that has at least 2-dimensions.
1610When dims>2, all dimensions of input must be of equal length.
1611This function modifies the input tensor in-place, and returns the input tensor.
1612
1613Arguments:
1614    fill_value (Scalar): the fill value
1615    wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
1616
1617Example::
1618
1619    >>> a = torch.zeros(3, 3)
1620    >>> a.fill_diagonal_(5)
1621    tensor([[5., 0., 0.],
1622            [0., 5., 0.],
1623            [0., 0., 5.]])
1624    >>> b = torch.zeros(7, 3)
1625    >>> b.fill_diagonal_(5)
1626    tensor([[5., 0., 0.],
1627            [0., 5., 0.],
1628            [0., 0., 5.],
1629            [0., 0., 0.],
1630            [0., 0., 0.],
1631            [0., 0., 0.],
1632            [0., 0., 0.]])
1633    >>> c = torch.zeros(7, 3)
1634    >>> c.fill_diagonal_(5, wrap=True)
1635    tensor([[5., 0., 0.],
1636            [0., 5., 0.],
1637            [0., 0., 5.],
1638            [0., 0., 0.],
1639            [5., 0., 0.],
1640            [0., 5., 0.],
1641            [0., 0., 5.]])
1642
1643""",
1644)
1645
1646add_docstr_all(
1647    "floor_divide",
1648    r"""
1649floor_divide(value) -> Tensor
1650
1651See :func:`torch.floor_divide`
1652""",
1653)
1654
1655add_docstr_all(
1656    "floor_divide_",
1657    r"""
1658floor_divide_(value) -> Tensor
1659
1660In-place version of :meth:`~Tensor.floor_divide`
1661""",
1662)
1663
1664add_docstr_all(
1665    "diff",
1666    r"""
1667diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
1668
1669See :func:`torch.diff`
1670""",
1671)
1672
1673add_docstr_all(
1674    "digamma",
1675    r"""
1676digamma() -> Tensor
1677
1678See :func:`torch.digamma`
1679""",
1680)
1681
1682add_docstr_all(
1683    "digamma_",
1684    r"""
1685digamma_() -> Tensor
1686
1687In-place version of :meth:`~Tensor.digamma`
1688""",
1689)
1690
1691add_docstr_all(
1692    "dim",
1693    r"""
1694dim() -> int
1695
1696Returns the number of dimensions of :attr:`self` tensor.
1697""",
1698)
1699
1700add_docstr_all(
1701    "dist",
1702    r"""
1703dist(other, p=2) -> Tensor
1704
1705See :func:`torch.dist`
1706""",
1707)
1708
1709add_docstr_all(
1710    "div",
1711    r"""
1712div(value, *, rounding_mode=None) -> Tensor
1713
1714See :func:`torch.div`
1715""",
1716)
1717
1718add_docstr_all(
1719    "div_",
1720    r"""
1721div_(value, *, rounding_mode=None) -> Tensor
1722
1723In-place version of :meth:`~Tensor.div`
1724""",
1725)
1726
1727add_docstr_all(
1728    "divide",
1729    r"""
1730divide(value, *, rounding_mode=None) -> Tensor
1731
1732See :func:`torch.divide`
1733""",
1734)
1735
1736add_docstr_all(
1737    "divide_",
1738    r"""
1739divide_(value, *, rounding_mode=None) -> Tensor
1740
1741In-place version of :meth:`~Tensor.divide`
1742""",
1743)
1744
1745add_docstr_all(
1746    "dot",
1747    r"""
1748dot(other) -> Tensor
1749
1750See :func:`torch.dot`
1751""",
1752)
1753
1754add_docstr_all(
1755    "element_size",
1756    r"""
1757element_size() -> int
1758
1759Returns the size in bytes of an individual element.
1760
1761Example::
1762
1763    >>> torch.tensor([]).element_size()
1764    4
1765    >>> torch.tensor([], dtype=torch.uint8).element_size()
1766    1
1767
1768""",
1769)
1770
1771add_docstr_all(
1772    "eq",
1773    r"""
1774eq(other) -> Tensor
1775
1776See :func:`torch.eq`
1777""",
1778)
1779
1780add_docstr_all(
1781    "eq_",
1782    r"""
1783eq_(other) -> Tensor
1784
1785In-place version of :meth:`~Tensor.eq`
1786""",
1787)
1788
1789add_docstr_all(
1790    "equal",
1791    r"""
1792equal(other) -> bool
1793
1794See :func:`torch.equal`
1795""",
1796)
1797
1798add_docstr_all(
1799    "erf",
1800    r"""
1801erf() -> Tensor
1802
1803See :func:`torch.erf`
1804""",
1805)
1806
1807add_docstr_all(
1808    "erf_",
1809    r"""
1810erf_() -> Tensor
1811
1812In-place version of :meth:`~Tensor.erf`
1813""",
1814)
1815
1816add_docstr_all(
1817    "erfc",
1818    r"""
1819erfc() -> Tensor
1820
1821See :func:`torch.erfc`
1822""",
1823)
1824
1825add_docstr_all(
1826    "erfc_",
1827    r"""
1828erfc_() -> Tensor
1829
1830In-place version of :meth:`~Tensor.erfc`
1831""",
1832)
1833
1834add_docstr_all(
1835    "erfinv",
1836    r"""
1837erfinv() -> Tensor
1838
1839See :func:`torch.erfinv`
1840""",
1841)
1842
1843add_docstr_all(
1844    "erfinv_",
1845    r"""
1846erfinv_() -> Tensor
1847
1848In-place version of :meth:`~Tensor.erfinv`
1849""",
1850)
1851
1852add_docstr_all(
1853    "exp",
1854    r"""
1855exp() -> Tensor
1856
1857See :func:`torch.exp`
1858""",
1859)
1860
1861add_docstr_all(
1862    "exp_",
1863    r"""
1864exp_() -> Tensor
1865
1866In-place version of :meth:`~Tensor.exp`
1867""",
1868)
1869
1870add_docstr_all(
1871    "exp2",
1872    r"""
1873exp2() -> Tensor
1874
1875See :func:`torch.exp2`
1876""",
1877)
1878
1879add_docstr_all(
1880    "exp2_",
1881    r"""
1882exp2_() -> Tensor
1883
1884In-place version of :meth:`~Tensor.exp2`
1885""",
1886)
1887
1888add_docstr_all(
1889    "expm1",
1890    r"""
1891expm1() -> Tensor
1892
1893See :func:`torch.expm1`
1894""",
1895)
1896
1897add_docstr_all(
1898    "expm1_",
1899    r"""
1900expm1_() -> Tensor
1901
1902In-place version of :meth:`~Tensor.expm1`
1903""",
1904)
1905
1906add_docstr_all(
1907    "exponential_",
1908    r"""
1909exponential_(lambd=1, *, generator=None) -> Tensor
1910
1911Fills :attr:`self` tensor with elements drawn from the PDF (probability density function):
1912
1913.. math::
1914
1915    f(x) = \lambda e^{-\lambda x}, x > 0
1916
1917.. note::
1918  In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`)
1919  implying that zero can be sampled from the exponential distribution.
1920  However, :func:`torch.Tensor.exponential_` does not sample zero,
1921  which means that its actual support is the interval (0, :math:`\inf`).
1922
1923  Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero.
1924""",
1925)
1926
1927add_docstr_all(
1928    "fill_",
1929    r"""
1930fill_(value) -> Tensor
1931
1932Fills :attr:`self` tensor with the specified value.
1933""",
1934)
1935
1936add_docstr_all(
1937    "floor",
1938    r"""
1939floor() -> Tensor
1940
1941See :func:`torch.floor`
1942""",
1943)
1944
1945add_docstr_all(
1946    "flip",
1947    r"""
1948flip(dims) -> Tensor
1949
1950See :func:`torch.flip`
1951""",
1952)
1953
1954add_docstr_all(
1955    "fliplr",
1956    r"""
1957fliplr() -> Tensor
1958
1959See :func:`torch.fliplr`
1960""",
1961)
1962
1963add_docstr_all(
1964    "flipud",
1965    r"""
1966flipud() -> Tensor
1967
1968See :func:`torch.flipud`
1969""",
1970)
1971
1972add_docstr_all(
1973    "roll",
1974    r"""
1975roll(shifts, dims) -> Tensor
1976
1977See :func:`torch.roll`
1978""",
1979)
1980
1981add_docstr_all(
1982    "floor_",
1983    r"""
1984floor_() -> Tensor
1985
1986In-place version of :meth:`~Tensor.floor`
1987""",
1988)
1989
1990add_docstr_all(
1991    "fmod",
1992    r"""
1993fmod(divisor) -> Tensor
1994
1995See :func:`torch.fmod`
1996""",
1997)
1998
1999add_docstr_all(
2000    "fmod_",
2001    r"""
2002fmod_(divisor) -> Tensor
2003
2004In-place version of :meth:`~Tensor.fmod`
2005""",
2006)
2007
2008add_docstr_all(
2009    "frac",
2010    r"""
2011frac() -> Tensor
2012
2013See :func:`torch.frac`
2014""",
2015)
2016
2017add_docstr_all(
2018    "frac_",
2019    r"""
2020frac_() -> Tensor
2021
2022In-place version of :meth:`~Tensor.frac`
2023""",
2024)
2025
2026add_docstr_all(
2027    "frexp",
2028    r"""
2029frexp(input) -> (Tensor mantissa, Tensor exponent)
2030
2031See :func:`torch.frexp`
2032""",
2033)
2034
2035add_docstr_all(
2036    "flatten",
2037    r"""
2038flatten(start_dim=0, end_dim=-1) -> Tensor
2039
2040See :func:`torch.flatten`
2041""",
2042)
2043
2044add_docstr_all(
2045    "gather",
2046    r"""
2047gather(dim, index) -> Tensor
2048
2049See :func:`torch.gather`
2050""",
2051)
2052
2053add_docstr_all(
2054    "gcd",
2055    r"""
2056gcd(other) -> Tensor
2057
2058See :func:`torch.gcd`
2059""",
2060)
2061
2062add_docstr_all(
2063    "gcd_",
2064    r"""
2065gcd_(other) -> Tensor
2066
2067In-place version of :meth:`~Tensor.gcd`
2068""",
2069)
2070
2071add_docstr_all(
2072    "ge",
2073    r"""
2074ge(other) -> Tensor
2075
2076See :func:`torch.ge`.
2077""",
2078)
2079
2080add_docstr_all(
2081    "ge_",
2082    r"""
2083ge_(other) -> Tensor
2084
2085In-place version of :meth:`~Tensor.ge`.
2086""",
2087)
2088
2089add_docstr_all(
2090    "greater_equal",
2091    r"""
2092greater_equal(other) -> Tensor
2093
2094See :func:`torch.greater_equal`.
2095""",
2096)
2097
2098add_docstr_all(
2099    "greater_equal_",
2100    r"""
2101greater_equal_(other) -> Tensor
2102
2103In-place version of :meth:`~Tensor.greater_equal`.
2104""",
2105)
2106
2107add_docstr_all(
2108    "geometric_",
2109    r"""
2110geometric_(p, *, generator=None) -> Tensor
2111
2112Fills :attr:`self` tensor with elements drawn from the geometric distribution:
2113
2114.. math::
2115
2116    P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ...
2117
2118.. note::
2119  :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas
2120  :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
2121  hence draws samples in :math:`\{0, 1, \ldots\}`.
2122""",
2123)
2124
2125add_docstr_all(
2126    "geqrf",
2127    r"""
2128geqrf() -> (Tensor, Tensor)
2129
2130See :func:`torch.geqrf`
2131""",
2132)
2133
2134add_docstr_all(
2135    "ger",
2136    r"""
2137ger(vec2) -> Tensor
2138
2139See :func:`torch.ger`
2140""",
2141)
2142
2143add_docstr_all(
2144    "inner",
2145    r"""
2146inner(other) -> Tensor
2147
2148See :func:`torch.inner`.
2149""",
2150)
2151
2152add_docstr_all(
2153    "outer",
2154    r"""
2155outer(vec2) -> Tensor
2156
2157See :func:`torch.outer`.
2158""",
2159)
2160
2161add_docstr_all(
2162    "hypot",
2163    r"""
2164hypot(other) -> Tensor
2165
2166See :func:`torch.hypot`
2167""",
2168)
2169
2170add_docstr_all(
2171    "hypot_",
2172    r"""
2173hypot_(other) -> Tensor
2174
2175In-place version of :meth:`~Tensor.hypot`
2176""",
2177)
2178
2179add_docstr_all(
2180    "i0",
2181    r"""
2182i0() -> Tensor
2183
2184See :func:`torch.i0`
2185""",
2186)
2187
2188add_docstr_all(
2189    "i0_",
2190    r"""
2191i0_() -> Tensor
2192
2193In-place version of :meth:`~Tensor.i0`
2194""",
2195)
2196
2197add_docstr_all(
2198    "igamma",
2199    r"""
2200igamma(other) -> Tensor
2201
2202See :func:`torch.igamma`
2203""",
2204)
2205
2206add_docstr_all(
2207    "igamma_",
2208    r"""
2209igamma_(other) -> Tensor
2210
2211In-place version of :meth:`~Tensor.igamma`
2212""",
2213)
2214
2215add_docstr_all(
2216    "igammac",
2217    r"""
2218igammac(other) -> Tensor
2219See :func:`torch.igammac`
2220""",
2221)
2222
2223add_docstr_all(
2224    "igammac_",
2225    r"""
2226igammac_(other) -> Tensor
2227In-place version of :meth:`~Tensor.igammac`
2228""",
2229)
2230
2231add_docstr_all(
2232    "indices",
2233    r"""
2234indices() -> Tensor
2235
2236Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
2237
2238.. warning::
2239  Throws an error if :attr:`self` is not a sparse COO tensor.
2240
2241See also :meth:`Tensor.values`.
2242
2243.. note::
2244  This method can only be called on a coalesced sparse tensor. See
2245  :meth:`Tensor.coalesce` for details.
2246""",
2247)
2248
2249add_docstr_all(
2250    "get_device",
2251    r"""
2252get_device() -> Device ordinal (Integer)
2253
2254For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
2255For CPU tensors, this function returns `-1`.
2256
2257Example::
2258
2259    >>> x = torch.randn(3, 4, 5, device='cuda:0')
2260    >>> x.get_device()
2261    0
2262    >>> x.cpu().get_device()
2263    -1
2264""",
2265)
2266
2267add_docstr_all(
2268    "values",
2269    r"""
2270values() -> Tensor
2271
2272Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
2273
2274.. warning::
2275  Throws an error if :attr:`self` is not a sparse COO tensor.
2276
2277See also :meth:`Tensor.indices`.
2278
2279.. note::
2280  This method can only be called on a coalesced sparse tensor. See
2281  :meth:`Tensor.coalesce` for details.
2282""",
2283)
2284
2285add_docstr_all(
2286    "gt",
2287    r"""
2288gt(other) -> Tensor
2289
2290See :func:`torch.gt`.
2291""",
2292)
2293
2294add_docstr_all(
2295    "gt_",
2296    r"""
2297gt_(other) -> Tensor
2298
2299In-place version of :meth:`~Tensor.gt`.
2300""",
2301)
2302
2303add_docstr_all(
2304    "greater",
2305    r"""
2306greater(other) -> Tensor
2307
2308See :func:`torch.greater`.
2309""",
2310)
2311
2312add_docstr_all(
2313    "greater_",
2314    r"""
2315greater_(other) -> Tensor
2316
2317In-place version of :meth:`~Tensor.greater`.
2318""",
2319)
2320
2321add_docstr_all(
2322    "has_names",
2323    r"""
2324Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
2325""",
2326)
2327
2328add_docstr_all(
2329    "hardshrink",
2330    r"""
2331hardshrink(lambd=0.5) -> Tensor
2332
2333See :func:`torch.nn.functional.hardshrink`
2334""",
2335)
2336
2337add_docstr_all(
2338    "heaviside",
2339    r"""
2340heaviside(values) -> Tensor
2341
2342See :func:`torch.heaviside`
2343""",
2344)
2345
2346add_docstr_all(
2347    "heaviside_",
2348    r"""
2349heaviside_(values) -> Tensor
2350
2351In-place version of :meth:`~Tensor.heaviside`
2352""",
2353)
2354
2355add_docstr_all(
2356    "histc",
2357    r"""
2358histc(bins=100, min=0, max=0) -> Tensor
2359
2360See :func:`torch.histc`
2361""",
2362)
2363
2364add_docstr_all(
2365    "histogram",
2366    r"""
2367histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
2368
2369See :func:`torch.histogram`
2370""",
2371)
2372
2373add_docstr_all(
2374    "index_add_",
2375    r"""
2376index_add_(dim, index, source, *, alpha=1) -> Tensor
2377
2378Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
2379tensor by adding to the indices in the order given in :attr:`index`. For example,
2380if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
2381``source`` is subtracted from the ``j``\ th row of :attr:`self`.
2382
2383The :attr:`dim`\ th dimension of ``source`` must have the same size as the
2384length of :attr:`index` (which must be a vector), and all other dimensions must
2385match :attr:`self`, or an error will be raised.
2386
2387For a 3-D tensor the output is given as::
2388
2389    self[index[i], :, :] += alpha * src[i, :, :]  # if dim == 0
2390    self[:, index[i], :] += alpha * src[:, i, :]  # if dim == 1
2391    self[:, :, index[i]] += alpha * src[:, :, i]  # if dim == 2
2392
2393Note:
2394    {forward_reproducibility_note}
2395
2396Args:
2397    dim (int): dimension along which to index
2398    index (Tensor): indices of ``source`` to select from,
2399            should have dtype either `torch.int64` or `torch.int32`
2400    source (Tensor): the tensor containing values to add
2401
2402Keyword args:
2403    alpha (Number): the scalar multiplier for ``source``
2404
2405Example::
2406
2407    >>> x = torch.ones(5, 3)
2408    >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
2409    >>> index = torch.tensor([0, 4, 2])
2410    >>> x.index_add_(0, index, t)
2411    tensor([[  2.,   3.,   4.],
2412            [  1.,   1.,   1.],
2413            [  8.,   9.,  10.],
2414            [  1.,   1.,   1.],
2415            [  5.,   6.,   7.]])
2416    >>> x.index_add_(0, index, t, alpha=-1)
2417    tensor([[  1.,   1.,   1.],
2418            [  1.,   1.,   1.],
2419            [  1.,   1.,   1.],
2420            [  1.,   1.,   1.],
2421            [  1.,   1.,   1.]])
2422""".format(**reproducibility_notes),
2423)
2424
2425add_docstr_all(
2426    "index_copy_",
2427    r"""
2428index_copy_(dim, index, tensor) -> Tensor
2429
2430Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
2431the indices in the order given in :attr:`index`. For example, if ``dim == 0``
2432and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
2433``j``\ th row of :attr:`self`.
2434
2435The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
2436length of :attr:`index` (which must be a vector), and all other dimensions must
2437match :attr:`self`, or an error will be raised.
2438
2439.. note::
2440    If :attr:`index` contains duplicate entries, multiple elements from
2441    :attr:`tensor` will be copied to the same index of :attr:`self`. The result
2442    is nondeterministic since it depends on which copy occurs last.
2443
2444Args:
2445    dim (int): dimension along which to index
2446    index (LongTensor): indices of :attr:`tensor` to select from
2447    tensor (Tensor): the tensor containing values to copy
2448
2449Example::
2450
2451    >>> x = torch.zeros(5, 3)
2452    >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
2453    >>> index = torch.tensor([0, 4, 2])
2454    >>> x.index_copy_(0, index, t)
2455    tensor([[ 1.,  2.,  3.],
2456            [ 0.,  0.,  0.],
2457            [ 7.,  8.,  9.],
2458            [ 0.,  0.,  0.],
2459            [ 4.,  5.,  6.]])
2460""",
2461)
2462
2463add_docstr_all(
2464    "index_fill_",
2465    r"""
2466index_fill_(dim, index, value) -> Tensor
2467
2468Fills the elements of the :attr:`self` tensor with value :attr:`value` by
2469selecting the indices in the order given in :attr:`index`.
2470
2471Args:
2472    dim (int): dimension along which to index
2473    index (LongTensor): indices of :attr:`self` tensor to fill in
2474    value (float): the value to fill with
2475
2476Example::
2477    >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
2478    >>> index = torch.tensor([0, 2])
2479    >>> x.index_fill_(1, index, -1)
2480    tensor([[-1.,  2., -1.],
2481            [-1.,  5., -1.],
2482            [-1.,  8., -1.]])
2483""",
2484)
2485
2486add_docstr_all(
2487    "index_put_",
2488    r"""
2489index_put_(indices, values, accumulate=False) -> Tensor
2490
2491Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
2492the indices specified in :attr:`indices` (which is a tuple of Tensors). The
2493expression ``tensor.index_put_(indices, values)`` is equivalent to
2494``tensor[indices] = values``. Returns :attr:`self`.
2495
2496If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
2497:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
2498contain duplicate elements.
2499
2500Args:
2501    indices (tuple of LongTensor): tensors used to index into `self`.
2502    values (Tensor): tensor of same dtype as `self`.
2503    accumulate (bool): whether to accumulate into self
2504""",
2505)
2506
2507add_docstr_all(
2508    "index_put",
2509    r"""
2510index_put(indices, values, accumulate=False) -> Tensor
2511
2512Out-place version of :meth:`~Tensor.index_put_`.
2513""",
2514)
2515
2516add_docstr_all(
2517    "index_reduce_",
2518    r"""
2519index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor
2520
2521Accumulate the elements of ``source`` into the :attr:`self`
2522tensor by accumulating to the indices in the order given in :attr:`index`
2523using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
2524``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
2525row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
2526:obj:`include_self="True"`, the values in the :attr:`self` tensor are included
2527in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
2528to are treated as if they were filled with the reduction identites.
2529
2530The :attr:`dim`\ th dimension of ``source`` must have the same size as the
2531length of :attr:`index` (which must be a vector), and all other dimensions must
2532match :attr:`self`, or an error will be raised.
2533
2534For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
2535output is given as::
2536
2537    self[index[i], :, :] *= src[i, :, :]  # if dim == 0
2538    self[:, index[i], :] *= src[:, i, :]  # if dim == 1
2539    self[:, :, index[i]] *= src[:, :, i]  # if dim == 2
2540
2541Note:
2542    {forward_reproducibility_note}
2543
2544.. note::
2545
2546    This function only supports floating point tensors.
2547
2548.. warning::
2549
2550    This function is in beta and may change in the near future.
2551
2552Args:
2553    dim (int): dimension along which to index
2554    index (Tensor): indices of ``source`` to select from,
2555        should have dtype either `torch.int64` or `torch.int32`
2556    source (FloatTensor): the tensor containing values to accumulate
2557    reduce (str): the reduction operation to apply
2558        (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
2559
2560Keyword args:
2561    include_self (bool): whether the elements from the ``self`` tensor are
2562        included in the reduction
2563
2564Example::
2565
2566    >>> x = torch.empty(5, 3).fill_(2)
2567    >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
2568    >>> index = torch.tensor([0, 4, 2, 0])
2569    >>> x.index_reduce_(0, index, t, 'prod')
2570    tensor([[20., 44., 72.],
2571            [ 2.,  2.,  2.],
2572            [14., 16., 18.],
2573            [ 2.,  2.,  2.],
2574            [ 8., 10., 12.]])
2575    >>> x = torch.empty(5, 3).fill_(2)
2576    >>> x.index_reduce_(0, index, t, 'prod', include_self=False)
2577    tensor([[10., 22., 36.],
2578            [ 2.,  2.,  2.],
2579            [ 7.,  8.,  9.],
2580            [ 2.,  2.,  2.],
2581            [ 4.,  5.,  6.]])
2582""".format(**reproducibility_notes),
2583)
2584
2585add_docstr_all(
2586    "index_select",
2587    r"""
2588index_select(dim, index) -> Tensor
2589
2590See :func:`torch.index_select`
2591""",
2592)
2593
2594add_docstr_all(
2595    "sparse_mask",
2596    r"""
2597sparse_mask(mask) -> Tensor
2598
2599Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
2600strided tensor :attr:`self` filtered by the indices of the sparse
2601tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
2602ignored. :attr:`self` and :attr:`mask` tensors must have the same
2603shape.
2604
2605.. note::
2606
2607  The returned sparse tensor might contain duplicate values if :attr:`mask`
2608  is not coalesced. It is therefore advisable to pass ``mask.coalesce()``
2609  if such behavior is not desired.
2610
2611.. note::
2612
2613  The returned sparse tensor has the same indices as the sparse tensor
2614  :attr:`mask`, even when the corresponding values in :attr:`self` are
2615  zeros.
2616
2617Args:
2618    mask (Tensor): a sparse tensor whose indices are used as a filter
2619
2620Example::
2621
2622    >>> nse = 5
2623    >>> dims = (5, 5, 2, 2)
2624    >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
2625    ...                torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
2626    >>> V = torch.randn(nse, dims[2], dims[3])
2627    >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
2628    >>> D = torch.randn(dims)
2629    >>> D.sparse_mask(S)
2630    tensor(indices=tensor([[0, 0, 0, 2],
2631                           [0, 1, 4, 3]]),
2632           values=tensor([[[ 1.6550,  0.2397],
2633                           [-0.1611, -0.0779]],
2634
2635                          [[ 0.2326, -1.0558],
2636                           [ 1.4711,  1.9678]],
2637
2638                          [[-0.5138, -0.0411],
2639                           [ 1.9417,  0.5158]],
2640
2641                          [[ 0.0793,  0.0036],
2642                           [-0.2569, -0.1055]]]),
2643           size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
2644""",
2645)
2646
2647add_docstr_all(
2648    "inverse",
2649    r"""
2650inverse() -> Tensor
2651
2652See :func:`torch.inverse`
2653""",
2654)
2655
2656add_docstr_all(
2657    "isnan",
2658    r"""
2659isnan() -> Tensor
2660
2661See :func:`torch.isnan`
2662""",
2663)
2664
2665add_docstr_all(
2666    "isinf",
2667    r"""
2668isinf() -> Tensor
2669
2670See :func:`torch.isinf`
2671""",
2672)
2673
2674add_docstr_all(
2675    "isposinf",
2676    r"""
2677isposinf() -> Tensor
2678
2679See :func:`torch.isposinf`
2680""",
2681)
2682
2683add_docstr_all(
2684    "isneginf",
2685    r"""
2686isneginf() -> Tensor
2687
2688See :func:`torch.isneginf`
2689""",
2690)
2691
2692add_docstr_all(
2693    "isfinite",
2694    r"""
2695isfinite() -> Tensor
2696
2697See :func:`torch.isfinite`
2698""",
2699)
2700
2701add_docstr_all(
2702    "isclose",
2703    r"""
2704isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
2705
2706See :func:`torch.isclose`
2707""",
2708)
2709
2710add_docstr_all(
2711    "isreal",
2712    r"""
2713isreal() -> Tensor
2714
2715See :func:`torch.isreal`
2716""",
2717)
2718
2719add_docstr_all(
2720    "is_coalesced",
2721    r"""
2722is_coalesced() -> bool
2723
2724Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
2725<sparse-coo-docs>` that is coalesced, ``False`` otherwise.
2726
2727.. warning::
2728  Throws an error if :attr:`self` is not a sparse COO tensor.
2729
2730See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
2731""",
2732)
2733
2734add_docstr_all(
2735    "is_contiguous",
2736    r"""
2737is_contiguous(memory_format=torch.contiguous_format) -> bool
2738
2739Returns True if :attr:`self` tensor is contiguous in memory in the order specified
2740by memory format.
2741
2742Args:
2743    memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
2744        order. Default: ``torch.contiguous_format``.
2745""",
2746)
2747
2748add_docstr_all(
2749    "is_pinned",
2750    r"""
2751Returns true if this tensor resides in pinned memory.
2752""",
2753)
2754
2755add_docstr_all(
2756    "is_floating_point",
2757    r"""
2758is_floating_point() -> bool
2759
2760Returns True if the data type of :attr:`self` is a floating point data type.
2761""",
2762)
2763
2764add_docstr_all(
2765    "is_complex",
2766    r"""
2767is_complex() -> bool
2768
2769Returns True if the data type of :attr:`self` is a complex data type.
2770""",
2771)
2772
2773add_docstr_all(
2774    "is_inference",
2775    r"""
2776is_inference() -> bool
2777
2778See :func:`torch.is_inference`
2779""",
2780)
2781
2782add_docstr_all(
2783    "is_conj",
2784    r"""
2785is_conj() -> bool
2786
2787Returns True if the conjugate bit of :attr:`self` is set to true.
2788""",
2789)
2790
2791add_docstr_all(
2792    "is_neg",
2793    r"""
2794is_neg() -> bool
2795
2796Returns True if the negative bit of :attr:`self` is set to true.
2797""",
2798)
2799
2800add_docstr_all(
2801    "is_signed",
2802    r"""
2803is_signed() -> bool
2804
2805Returns True if the data type of :attr:`self` is a signed data type.
2806""",
2807)
2808
2809add_docstr_all(
2810    "is_set_to",
2811    r"""
2812is_set_to(tensor) -> bool
2813
2814Returns True if both tensors are pointing to the exact same memory (same
2815storage, offset, size and stride).
2816""",
2817)
2818
2819add_docstr_all(
2820    "item",
2821    r"""
2822item() -> number
2823
2824Returns the value of this tensor as a standard Python number. This only works
2825for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
2826
2827This operation is not differentiable.
2828
2829Example::
2830
2831    >>> x = torch.tensor([1.0])
2832    >>> x.item()
2833    1.0
2834
2835""",
2836)
2837
2838add_docstr_all(
2839    "kron",
2840    r"""
2841kron(other) -> Tensor
2842
2843See :func:`torch.kron`
2844""",
2845)
2846
2847add_docstr_all(
2848    "kthvalue",
2849    r"""
2850kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
2851
2852See :func:`torch.kthvalue`
2853""",
2854)
2855
2856add_docstr_all(
2857    "ldexp",
2858    r"""
2859ldexp(other) -> Tensor
2860
2861See :func:`torch.ldexp`
2862""",
2863)
2864
2865add_docstr_all(
2866    "ldexp_",
2867    r"""
2868ldexp_(other) -> Tensor
2869
2870In-place version of :meth:`~Tensor.ldexp`
2871""",
2872)
2873
2874add_docstr_all(
2875    "lcm",
2876    r"""
2877lcm(other) -> Tensor
2878
2879See :func:`torch.lcm`
2880""",
2881)
2882
2883add_docstr_all(
2884    "lcm_",
2885    r"""
2886lcm_(other) -> Tensor
2887
2888In-place version of :meth:`~Tensor.lcm`
2889""",
2890)
2891
2892add_docstr_all(
2893    "le",
2894    r"""
2895le(other) -> Tensor
2896
2897See :func:`torch.le`.
2898""",
2899)
2900
2901add_docstr_all(
2902    "le_",
2903    r"""
2904le_(other) -> Tensor
2905
2906In-place version of :meth:`~Tensor.le`.
2907""",
2908)
2909
2910add_docstr_all(
2911    "less_equal",
2912    r"""
2913less_equal(other) -> Tensor
2914
2915See :func:`torch.less_equal`.
2916""",
2917)
2918
2919add_docstr_all(
2920    "less_equal_",
2921    r"""
2922less_equal_(other) -> Tensor
2923
2924In-place version of :meth:`~Tensor.less_equal`.
2925""",
2926)
2927
2928add_docstr_all(
2929    "lerp",
2930    r"""
2931lerp(end, weight) -> Tensor
2932
2933See :func:`torch.lerp`
2934""",
2935)
2936
2937add_docstr_all(
2938    "lerp_",
2939    r"""
2940lerp_(end, weight) -> Tensor
2941
2942In-place version of :meth:`~Tensor.lerp`
2943""",
2944)
2945
2946add_docstr_all(
2947    "lgamma",
2948    r"""
2949lgamma() -> Tensor
2950
2951See :func:`torch.lgamma`
2952""",
2953)
2954
2955add_docstr_all(
2956    "lgamma_",
2957    r"""
2958lgamma_() -> Tensor
2959
2960In-place version of :meth:`~Tensor.lgamma`
2961""",
2962)
2963
2964add_docstr_all(
2965    "log",
2966    r"""
2967log() -> Tensor
2968
2969See :func:`torch.log`
2970""",
2971)
2972
2973add_docstr_all(
2974    "log_",
2975    r"""
2976log_() -> Tensor
2977
2978In-place version of :meth:`~Tensor.log`
2979""",
2980)
2981
2982add_docstr_all(
2983    "log10",
2984    r"""
2985log10() -> Tensor
2986
2987See :func:`torch.log10`
2988""",
2989)
2990
2991add_docstr_all(
2992    "log10_",
2993    r"""
2994log10_() -> Tensor
2995
2996In-place version of :meth:`~Tensor.log10`
2997""",
2998)
2999
3000add_docstr_all(
3001    "log1p",
3002    r"""
3003log1p() -> Tensor
3004
3005See :func:`torch.log1p`
3006""",
3007)
3008
3009add_docstr_all(
3010    "log1p_",
3011    r"""
3012log1p_() -> Tensor
3013
3014In-place version of :meth:`~Tensor.log1p`
3015""",
3016)
3017
3018add_docstr_all(
3019    "log2",
3020    r"""
3021log2() -> Tensor
3022
3023See :func:`torch.log2`
3024""",
3025)
3026
3027add_docstr_all(
3028    "log2_",
3029    r"""
3030log2_() -> Tensor
3031
3032In-place version of :meth:`~Tensor.log2`
3033""",
3034)
3035
3036add_docstr_all(
3037    "logaddexp",
3038    r"""
3039logaddexp(other) -> Tensor
3040
3041See :func:`torch.logaddexp`
3042""",
3043)
3044
3045add_docstr_all(
3046    "logaddexp2",
3047    r"""
3048logaddexp2(other) -> Tensor
3049
3050See :func:`torch.logaddexp2`
3051""",
3052)
3053
3054add_docstr_all(
3055    "log_normal_",
3056    r"""
3057log_normal_(mean=1, std=2, *, generator=None)
3058
3059Fills :attr:`self` tensor with numbers samples from the log-normal distribution
3060parameterized by the given mean :math:`\mu` and standard deviation
3061:math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
3062standard deviation of the underlying normal distribution, and not of the
3063returned distribution:
3064
3065.. math::
3066
3067    f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
3068""",
3069)
3070
3071add_docstr_all(
3072    "logsumexp",
3073    r"""
3074logsumexp(dim, keepdim=False) -> Tensor
3075
3076See :func:`torch.logsumexp`
3077""",
3078)
3079
3080add_docstr_all(
3081    "lt",
3082    r"""
3083lt(other) -> Tensor
3084
3085See :func:`torch.lt`.
3086""",
3087)
3088
3089add_docstr_all(
3090    "lt_",
3091    r"""
3092lt_(other) -> Tensor
3093
3094In-place version of :meth:`~Tensor.lt`.
3095""",
3096)
3097
3098add_docstr_all(
3099    "less",
3100    r"""
3101lt(other) -> Tensor
3102
3103See :func:`torch.less`.
3104""",
3105)
3106
3107add_docstr_all(
3108    "less_",
3109    r"""
3110less_(other) -> Tensor
3111
3112In-place version of :meth:`~Tensor.less`.
3113""",
3114)
3115
3116add_docstr_all(
3117    "lu_solve",
3118    r"""
3119lu_solve(LU_data, LU_pivots) -> Tensor
3120
3121See :func:`torch.lu_solve`
3122""",
3123)
3124
3125add_docstr_all(
3126    "map_",
3127    r"""
3128map_(tensor, callable)
3129
3130Applies :attr:`callable` for each element in :attr:`self` tensor and the given
3131:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
3132the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
3133
3134The :attr:`callable` should have the signature::
3135
3136    def callable(a, b) -> number
3137""",
3138)
3139
3140add_docstr_all(
3141    "masked_scatter_",
3142    r"""
3143masked_scatter_(mask, source)
3144
3145Copies elements from :attr:`source` into :attr:`self` tensor at positions where
3146the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self`
3147starting at position 0 of :attr:`source` and continuing in order one-by-one for each
3148occurrence of :attr:`mask` being True.
3149The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
3150with the shape of the underlying tensor. The :attr:`source` should have at least
3151as many elements as the number of ones in :attr:`mask`.
3152
3153Args:
3154    mask (BoolTensor): the boolean mask
3155    source (Tensor): the tensor to copy from
3156
3157.. note::
3158
3159    The :attr:`mask` operates on the :attr:`self` tensor, not on the given
3160    :attr:`source` tensor.
3161
3162Example:
3163
3164    >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
3165    >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
3166    >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
3167    >>> self.masked_scatter_(mask, source)
3168    tensor([[0, 0, 0, 0, 1],
3169            [2, 3, 0, 4, 5]])
3170
3171""",
3172)
3173
3174add_docstr_all(
3175    "masked_fill_",
3176    r"""
3177masked_fill_(mask, value)
3178
3179Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
3180True. The shape of :attr:`mask` must be
3181:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
3182tensor.
3183
3184Args:
3185    mask (BoolTensor): the boolean mask
3186    value (float): the value to fill in with
3187""",
3188)
3189
3190add_docstr_all(
3191    "masked_select",
3192    r"""
3193masked_select(mask) -> Tensor
3194
3195See :func:`torch.masked_select`
3196""",
3197)
3198
3199add_docstr_all(
3200    "matrix_power",
3201    r"""
3202matrix_power(n) -> Tensor
3203
3204.. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
3205
3206Alias for :func:`torch.linalg.matrix_power`
3207""",
3208)
3209
3210add_docstr_all(
3211    "matrix_exp",
3212    r"""
3213matrix_exp() -> Tensor
3214
3215See :func:`torch.matrix_exp`
3216""",
3217)
3218
3219add_docstr_all(
3220    "max",
3221    r"""
3222max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
3223
3224See :func:`torch.max`
3225""",
3226)
3227
3228add_docstr_all(
3229    "amax",
3230    r"""
3231amax(dim=None, keepdim=False) -> Tensor
3232
3233See :func:`torch.amax`
3234""",
3235)
3236
3237add_docstr_all(
3238    "maximum",
3239    r"""
3240maximum(other) -> Tensor
3241
3242See :func:`torch.maximum`
3243""",
3244)
3245
3246add_docstr_all(
3247    "fmax",
3248    r"""
3249fmax(other) -> Tensor
3250
3251See :func:`torch.fmax`
3252""",
3253)
3254
3255add_docstr_all(
3256    "argmax",
3257    r"""
3258argmax(dim=None, keepdim=False) -> LongTensor
3259
3260See :func:`torch.argmax`
3261""",
3262)
3263
3264add_docstr_all(
3265    "argwhere",
3266    r"""
3267argwhere() -> Tensor
3268
3269See :func:`torch.argwhere`
3270""",
3271)
3272
3273add_docstr_all(
3274    "mean",
3275    r"""
3276mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
3277
3278See :func:`torch.mean`
3279""",
3280)
3281
3282add_docstr_all(
3283    "nanmean",
3284    r"""
3285nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
3286
3287See :func:`torch.nanmean`
3288""",
3289)
3290
3291add_docstr_all(
3292    "median",
3293    r"""
3294median(dim=None, keepdim=False) -> (Tensor, LongTensor)
3295
3296See :func:`torch.median`
3297""",
3298)
3299
3300add_docstr_all(
3301    "nanmedian",
3302    r"""
3303nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
3304
3305See :func:`torch.nanmedian`
3306""",
3307)
3308
3309add_docstr_all(
3310    "min",
3311    r"""
3312min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
3313
3314See :func:`torch.min`
3315""",
3316)
3317
3318add_docstr_all(
3319    "amin",
3320    r"""
3321amin(dim=None, keepdim=False) -> Tensor
3322
3323See :func:`torch.amin`
3324""",
3325)
3326
3327add_docstr_all(
3328    "minimum",
3329    r"""
3330minimum(other) -> Tensor
3331
3332See :func:`torch.minimum`
3333""",
3334)
3335
3336add_docstr_all(
3337    "aminmax",
3338    r"""
3339aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
3340
3341See :func:`torch.aminmax`
3342""",
3343)
3344
3345add_docstr_all(
3346    "fmin",
3347    r"""
3348fmin(other) -> Tensor
3349
3350See :func:`torch.fmin`
3351""",
3352)
3353
3354add_docstr_all(
3355    "argmin",
3356    r"""
3357argmin(dim=None, keepdim=False) -> LongTensor
3358
3359See :func:`torch.argmin`
3360""",
3361)
3362
3363add_docstr_all(
3364    "mm",
3365    r"""
3366mm(mat2) -> Tensor
3367
3368See :func:`torch.mm`
3369""",
3370)
3371
3372add_docstr_all(
3373    "mode",
3374    r"""
3375mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
3376
3377See :func:`torch.mode`
3378""",
3379)
3380
3381add_docstr_all(
3382    "movedim",
3383    r"""
3384movedim(source, destination) -> Tensor
3385
3386See :func:`torch.movedim`
3387""",
3388)
3389
3390add_docstr_all(
3391    "moveaxis",
3392    r"""
3393moveaxis(source, destination) -> Tensor
3394
3395See :func:`torch.moveaxis`
3396""",
3397)
3398
3399add_docstr_all(
3400    "mul",
3401    r"""
3402mul(value) -> Tensor
3403
3404See :func:`torch.mul`.
3405""",
3406)
3407
3408add_docstr_all(
3409    "mul_",
3410    r"""
3411mul_(value) -> Tensor
3412
3413In-place version of :meth:`~Tensor.mul`.
3414""",
3415)
3416
3417add_docstr_all(
3418    "multiply",
3419    r"""
3420multiply(value) -> Tensor
3421
3422See :func:`torch.multiply`.
3423""",
3424)
3425
3426add_docstr_all(
3427    "multiply_",
3428    r"""
3429multiply_(value) -> Tensor
3430
3431In-place version of :meth:`~Tensor.multiply`.
3432""",
3433)
3434
3435add_docstr_all(
3436    "multinomial",
3437    r"""
3438multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
3439
3440See :func:`torch.multinomial`
3441""",
3442)
3443
3444add_docstr_all(
3445    "mv",
3446    r"""
3447mv(vec) -> Tensor
3448
3449See :func:`torch.mv`
3450""",
3451)
3452
3453add_docstr_all(
3454    "mvlgamma",
3455    r"""
3456mvlgamma(p) -> Tensor
3457
3458See :func:`torch.mvlgamma`
3459""",
3460)
3461
3462add_docstr_all(
3463    "mvlgamma_",
3464    r"""
3465mvlgamma_(p) -> Tensor
3466
3467In-place version of :meth:`~Tensor.mvlgamma`
3468""",
3469)
3470
3471add_docstr_all(
3472    "narrow",
3473    r"""
3474narrow(dimension, start, length) -> Tensor
3475
3476See :func:`torch.narrow`.
3477""",
3478)
3479
3480add_docstr_all(
3481    "narrow_copy",
3482    r"""
3483narrow_copy(dimension, start, length) -> Tensor
3484
3485See :func:`torch.narrow_copy`.
3486""",
3487)
3488
3489add_docstr_all(
3490    "ndimension",
3491    r"""
3492ndimension() -> int
3493
3494Alias for :meth:`~Tensor.dim()`
3495""",
3496)
3497
3498add_docstr_all(
3499    "nan_to_num",
3500    r"""
3501nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
3502
3503See :func:`torch.nan_to_num`.
3504""",
3505)
3506
3507add_docstr_all(
3508    "nan_to_num_",
3509    r"""
3510nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
3511
3512In-place version of :meth:`~Tensor.nan_to_num`.
3513""",
3514)
3515
3516add_docstr_all(
3517    "ne",
3518    r"""
3519ne(other) -> Tensor
3520
3521See :func:`torch.ne`.
3522""",
3523)
3524
3525add_docstr_all(
3526    "ne_",
3527    r"""
3528ne_(other) -> Tensor
3529
3530In-place version of :meth:`~Tensor.ne`.
3531""",
3532)
3533
3534add_docstr_all(
3535    "not_equal",
3536    r"""
3537not_equal(other) -> Tensor
3538
3539See :func:`torch.not_equal`.
3540""",
3541)
3542
3543add_docstr_all(
3544    "not_equal_",
3545    r"""
3546not_equal_(other) -> Tensor
3547
3548In-place version of :meth:`~Tensor.not_equal`.
3549""",
3550)
3551
3552add_docstr_all(
3553    "neg",
3554    r"""
3555neg() -> Tensor
3556
3557See :func:`torch.neg`
3558""",
3559)
3560
3561add_docstr_all(
3562    "negative",
3563    r"""
3564negative() -> Tensor
3565
3566See :func:`torch.negative`
3567""",
3568)
3569
3570add_docstr_all(
3571    "neg_",
3572    r"""
3573neg_() -> Tensor
3574
3575In-place version of :meth:`~Tensor.neg`
3576""",
3577)
3578
3579add_docstr_all(
3580    "negative_",
3581    r"""
3582negative_() -> Tensor
3583
3584In-place version of :meth:`~Tensor.negative`
3585""",
3586)
3587
3588add_docstr_all(
3589    "nelement",
3590    r"""
3591nelement() -> int
3592
3593Alias for :meth:`~Tensor.numel`
3594""",
3595)
3596
3597add_docstr_all(
3598    "nextafter",
3599    r"""
3600nextafter(other) -> Tensor
3601See :func:`torch.nextafter`
3602""",
3603)
3604
3605add_docstr_all(
3606    "nextafter_",
3607    r"""
3608nextafter_(other) -> Tensor
3609In-place version of :meth:`~Tensor.nextafter`
3610""",
3611)
3612
3613add_docstr_all(
3614    "nonzero",
3615    r"""
3616nonzero() -> LongTensor
3617
3618See :func:`torch.nonzero`
3619""",
3620)
3621
3622add_docstr_all(
3623    "nonzero_static",
3624    r"""
3625nonzero_static(input, *, size, fill_value=-1) -> Tensor
3626
3627Returns a 2-D tensor where each row is the index for a non-zero value.
3628The returned Tensor has the same `torch.dtype` as `torch.nonzero()`.
3629
3630Args:
3631    input (Tensor): the input tensor to count non-zero elements.
3632
3633Keyword args:
3634    size (int): the size of non-zero elements expected to be included in the out
3635        tensor. Pad the out tensor with `fill_value` if the `size` is larger
3636        than total number of non-zero elements, truncate out tensor if `size`
3637        is smaller. The size must be a non-negative integer.
3638    fill_value (int): the value to fill the output tensor with when `size` is larger
3639        than the total number of non-zero elements. Default is `-1` to represent
3640        invalid index.
3641
3642Example:
3643
3644    # Example 1: Padding
3645    >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
3646    >>> static_size = 4
3647    >>> t = torch.nonzero_static(input_tensor, size = static_size)
3648    tensor([[  0,   0],
3649            [  1,   0],
3650            [  1,   1],
3651            [  -1, -1]], dtype=torch.int64)
3652
3653    # Example 2: Truncating
3654    >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
3655    >>> static_size = 2
3656    >>> t = torch.nonzero_static(input_tensor, size = static_size)
3657    tensor([[  0,   0],
3658            [  1,   0]], dtype=torch.int64)
3659
3660    # Example 3: 0 size
3661    >>> input_tensor = torch.tensor([10])
3662    >>> static_size = 0
3663    >>> t = torch.nonzero_static(input_tensor, size = static_size)
3664    tensor([], size=(0, 1), dtype=torch.int64)
3665
3666    # Example 4: 0 rank input
3667    >>> input_tensor = torch.tensor(10)
3668    >>> static_size = 2
3669    >>> t = torch.nonzero_static(input_tensor, size = static_size)
3670    tensor([], size=(2, 0), dtype=torch.int64)
3671""",
3672)
3673
3674add_docstr_all(
3675    "norm",
3676    r"""
3677norm(p=2, dim=None, keepdim=False) -> Tensor
3678
3679See :func:`torch.norm`
3680""",
3681)
3682
3683add_docstr_all(
3684    "normal_",
3685    r"""
3686normal_(mean=0, std=1, *, generator=None) -> Tensor
3687
3688Fills :attr:`self` tensor with elements samples from the normal distribution
3689parameterized by :attr:`mean` and :attr:`std`.
3690""",
3691)
3692
3693add_docstr_all(
3694    "numel",
3695    r"""
3696numel() -> int
3697
3698See :func:`torch.numel`
3699""",
3700)
3701
3702add_docstr_all(
3703    "numpy",
3704    r"""
3705numpy(*, force=False) -> numpy.ndarray
3706
3707Returns the tensor as a NumPy :class:`ndarray`.
3708
3709If :attr:`force` is ``False`` (the default), the conversion
3710is performed only if the tensor is on the CPU, does not require grad,
3711does not have its conjugate bit set, and is a dtype and layout that
3712NumPy supports. The returned ndarray and the tensor will share their
3713storage, so changes to the tensor will be reflected in the ndarray
3714and vice versa.
3715
3716If :attr:`force` is ``True`` this is equivalent to
3717calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
3718If the tensor isn't on the CPU or the conjugate or negative bit is set,
3719the tensor won't share its storage with the returned ndarray.
3720Setting :attr:`force` to ``True`` can be a useful shorthand.
3721
3722Args:
3723    force (bool): if ``True``, the ndarray may be a copy of the tensor
3724               instead of always sharing memory, defaults to ``False``.
3725""",
3726)
3727
3728add_docstr_all(
3729    "orgqr",
3730    r"""
3731orgqr(input2) -> Tensor
3732
3733See :func:`torch.orgqr`
3734""",
3735)
3736
3737add_docstr_all(
3738    "ormqr",
3739    r"""
3740ormqr(input2, input3, left=True, transpose=False) -> Tensor
3741
3742See :func:`torch.ormqr`
3743""",
3744)
3745
3746add_docstr_all(
3747    "permute",
3748    r"""
3749permute(*dims) -> Tensor
3750
3751See :func:`torch.permute`
3752""",
3753)
3754
3755add_docstr_all(
3756    "polygamma",
3757    r"""
3758polygamma(n) -> Tensor
3759
3760See :func:`torch.polygamma`
3761""",
3762)
3763
3764add_docstr_all(
3765    "polygamma_",
3766    r"""
3767polygamma_(n) -> Tensor
3768
3769In-place version of :meth:`~Tensor.polygamma`
3770""",
3771)
3772
3773add_docstr_all(
3774    "positive",
3775    r"""
3776positive() -> Tensor
3777
3778See :func:`torch.positive`
3779""",
3780)
3781
3782add_docstr_all(
3783    "pow",
3784    r"""
3785pow(exponent) -> Tensor
3786
3787See :func:`torch.pow`
3788""",
3789)
3790
3791add_docstr_all(
3792    "pow_",
3793    r"""
3794pow_(exponent) -> Tensor
3795
3796In-place version of :meth:`~Tensor.pow`
3797""",
3798)
3799
3800add_docstr_all(
3801    "float_power",
3802    r"""
3803float_power(exponent) -> Tensor
3804
3805See :func:`torch.float_power`
3806""",
3807)
3808
3809add_docstr_all(
3810    "float_power_",
3811    r"""
3812float_power_(exponent) -> Tensor
3813
3814In-place version of :meth:`~Tensor.float_power`
3815""",
3816)
3817
3818add_docstr_all(
3819    "prod",
3820    r"""
3821prod(dim=None, keepdim=False, dtype=None) -> Tensor
3822
3823See :func:`torch.prod`
3824""",
3825)
3826
3827add_docstr_all(
3828    "put_",
3829    r"""
3830put_(index, source, accumulate=False) -> Tensor
3831
3832Copies the elements from :attr:`source` into the positions specified by
3833:attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
3834it were a 1-D tensor.
3835
3836:attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
3837the same shape.
3838
3839If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
3840:attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
3841contain duplicate elements.
3842
3843Args:
3844    index (LongTensor): the indices into self
3845    source (Tensor): the tensor containing values to copy from
3846    accumulate (bool): whether to accumulate into self
3847
3848Example::
3849
3850    >>> src = torch.tensor([[4, 3, 5],
3851    ...                     [6, 7, 8]])
3852    >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
3853    tensor([[  4,   9,   5],
3854            [ 10,   7,   8]])
3855""",
3856)
3857
3858add_docstr_all(
3859    "put",
3860    r"""
3861put(input, index, source, accumulate=False) -> Tensor
3862
3863Out-of-place version of :meth:`torch.Tensor.put_`.
3864`input` corresponds to `self` in :meth:`torch.Tensor.put_`.
3865""",
3866)
3867
3868add_docstr_all(
3869    "qr",
3870    r"""
3871qr(some=True) -> (Tensor, Tensor)
3872
3873See :func:`torch.qr`
3874""",
3875)
3876
3877add_docstr_all(
3878    "qscheme",
3879    r"""
3880qscheme() -> torch.qscheme
3881
3882Returns the quantization scheme of a given QTensor.
3883""",
3884)
3885
3886add_docstr_all(
3887    "quantile",
3888    r"""
3889quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
3890
3891See :func:`torch.quantile`
3892""",
3893)
3894
3895add_docstr_all(
3896    "nanquantile",
3897    r"""
3898nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
3899
3900See :func:`torch.nanquantile`
3901""",
3902)
3903
3904add_docstr_all(
3905    "q_scale",
3906    r"""
3907q_scale() -> float
3908
3909Given a Tensor quantized by linear(affine) quantization,
3910returns the scale of the underlying quantizer().
3911""",
3912)
3913
3914add_docstr_all(
3915    "q_zero_point",
3916    r"""
3917q_zero_point() -> int
3918
3919Given a Tensor quantized by linear(affine) quantization,
3920returns the zero_point of the underlying quantizer().
3921""",
3922)
3923
3924add_docstr_all(
3925    "q_per_channel_scales",
3926    r"""
3927q_per_channel_scales() -> Tensor
3928
3929Given a Tensor quantized by linear (affine) per-channel quantization,
3930returns a Tensor of scales of the underlying quantizer. It has the number of
3931elements that matches the corresponding dimensions (from q_per_channel_axis) of
3932the tensor.
3933""",
3934)
3935
3936add_docstr_all(
3937    "q_per_channel_zero_points",
3938    r"""
3939q_per_channel_zero_points() -> Tensor
3940
3941Given a Tensor quantized by linear (affine) per-channel quantization,
3942returns a tensor of zero_points of the underlying quantizer. It has the number of
3943elements that matches the corresponding dimensions (from q_per_channel_axis) of
3944the tensor.
3945""",
3946)
3947
3948add_docstr_all(
3949    "q_per_channel_axis",
3950    r"""
3951q_per_channel_axis() -> int
3952
3953Given a Tensor quantized by linear (affine) per-channel quantization,
3954returns the index of dimension on which per-channel quantization is applied.
3955""",
3956)
3957
3958add_docstr_all(
3959    "random_",
3960    r"""
3961random_(from=0, to=None, *, generator=None) -> Tensor
3962
3963Fills :attr:`self` tensor with numbers sampled from the discrete uniform
3964distribution over ``[from, to - 1]``. If not specified, the values are usually
3965only bounded by :attr:`self` tensor's data type. However, for floating point
3966types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
3967value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
3968will be uniform in ``[0, 2^53]``.
3969""",
3970)
3971
3972add_docstr_all(
3973    "rad2deg",
3974    r"""
3975rad2deg() -> Tensor
3976
3977See :func:`torch.rad2deg`
3978""",
3979)
3980
3981add_docstr_all(
3982    "rad2deg_",
3983    r"""
3984rad2deg_() -> Tensor
3985
3986In-place version of :meth:`~Tensor.rad2deg`
3987""",
3988)
3989
3990add_docstr_all(
3991    "deg2rad",
3992    r"""
3993deg2rad() -> Tensor
3994
3995See :func:`torch.deg2rad`
3996""",
3997)
3998
3999add_docstr_all(
4000    "deg2rad_",
4001    r"""
4002deg2rad_() -> Tensor
4003
4004In-place version of :meth:`~Tensor.deg2rad`
4005""",
4006)
4007
4008add_docstr_all(
4009    "ravel",
4010    r"""
4011ravel() -> Tensor
4012
4013see :func:`torch.ravel`
4014""",
4015)
4016
4017add_docstr_all(
4018    "reciprocal",
4019    r"""
4020reciprocal() -> Tensor
4021
4022See :func:`torch.reciprocal`
4023""",
4024)
4025
4026add_docstr_all(
4027    "reciprocal_",
4028    r"""
4029reciprocal_() -> Tensor
4030
4031In-place version of :meth:`~Tensor.reciprocal`
4032""",
4033)
4034
4035add_docstr_all(
4036    "record_stream",
4037    r"""
4038record_stream(stream)
4039
4040Marks the tensor as having been used by this stream.  When the tensor
4041is deallocated, ensure the tensor memory is not reused for another tensor
4042until all work queued on :attr:`stream` at the time of deallocation is
4043complete.
4044
4045.. note::
4046
4047    The caching allocator is aware of only the stream where a tensor was
4048    allocated. Due to the awareness, it already correctly manages the life
4049    cycle of tensors on only one stream. But if a tensor is used on a stream
4050    different from the stream of origin, the allocator might reuse the memory
4051    unexpectedly. Calling this method lets the allocator know which streams
4052    have used the tensor.
4053
4054.. warning::
4055
4056    This method is most suitable for use cases where you are providing a
4057    function that created a tensor on a side stream, and want users to be able
4058    to make use of the tensor without having to think carefully about stream
4059    safety when making use of them.  These safety guarantees come at some
4060    performance and predictability cost (analogous to the tradeoff between GC
4061    and manual memory management), so if you are in a situation where
4062    you manage the full lifetime of your tensors, you may consider instead
4063    manually managing CUDA events so that calling this method is not necessary.
4064    In particular, when you call this method, on later allocations the
4065    allocator will poll the recorded stream to see if all operations have
4066    completed yet; you can potentially race with side stream computation and
4067    non-deterministically reuse or fail to reuse memory for an allocation.
4068
4069    You can safely use tensors allocated on side streams without
4070    :meth:`~Tensor.record_stream`; you must manually ensure that
4071    any non-creation stream uses of a tensor are synced back to the creation
4072    stream before you deallocate the tensor.  As the CUDA caching allocator
4073    guarantees that the memory will only be reused with the same creation stream,
4074    this is sufficient to ensure that writes to future reallocations of the
4075    memory will be delayed until non-creation stream uses are done.
4076    (Counterintuitively, you may observe that on the CPU side we have already
4077    reallocated the tensor, even though CUDA kernels on the old tensor are
4078    still in progress.  This is fine, because CUDA operations on the new
4079    tensor will appropriately wait for the old operations to complete, as they
4080    are all on the same stream.)
4081
4082    Concretely, this looks like this::
4083
4084        with torch.cuda.stream(s0):
4085            x = torch.zeros(N)
4086
4087        s1.wait_stream(s0)
4088        with torch.cuda.stream(s1):
4089            y = some_comm_op(x)
4090
4091        ... some compute on s0 ...
4092
4093        # synchronize creation stream s0 to side stream s1
4094        # before deallocating x
4095        s0.wait_stream(s1)
4096        del x
4097
4098    Note that some discretion is required when deciding when to perform
4099    ``s0.wait_stream(s1)``.  In particular, if we were to wait immediately
4100    after ``some_comm_op``, there wouldn't be any point in having the side
4101    stream; it would be equivalent to have run ``some_comm_op`` on ``s0``.
4102    Instead, the synchronization must be placed at some appropriate, later
4103    point in time where you expect the side stream ``s1`` to have finished
4104    work.  This location is typically identified via profiling, e.g., using
4105    Chrome traces produced
4106    :meth:`torch.autograd.profiler.profile.export_chrome_trace`.  If you
4107    place the wait too early, work on s0 will block until ``s1`` has finished,
4108    preventing further overlapping of communication and computation.  If you
4109    place the wait too late, you will use more memory than is strictly
4110    necessary (as you are keeping ``x`` live for longer.)  For a concrete
4111    example of how this guidance can be applied in practice, see this post:
4112    `FSDP and CUDACachingAllocator
4113    <https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486>`_.
4114""",
4115)
4116
4117add_docstr_all(
4118    "remainder",
4119    r"""
4120remainder(divisor) -> Tensor
4121
4122See :func:`torch.remainder`
4123""",
4124)
4125
4126add_docstr_all(
4127    "remainder_",
4128    r"""
4129remainder_(divisor) -> Tensor
4130
4131In-place version of :meth:`~Tensor.remainder`
4132""",
4133)
4134
4135add_docstr_all(
4136    "renorm",
4137    r"""
4138renorm(p, dim, maxnorm) -> Tensor
4139
4140See :func:`torch.renorm`
4141""",
4142)
4143
4144add_docstr_all(
4145    "renorm_",
4146    r"""
4147renorm_(p, dim, maxnorm) -> Tensor
4148
4149In-place version of :meth:`~Tensor.renorm`
4150""",
4151)
4152
4153add_docstr_all(
4154    "repeat",
4155    r"""
4156repeat(*repeats) -> Tensor
4157
4158Repeats this tensor along the specified dimensions.
4159
4160Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
4161
4162.. warning::
4163
4164    :meth:`~Tensor.repeat` behaves differently from
4165    `numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
4166    but is more similar to
4167    `numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
4168    For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
4169
4170Args:
4171    repeat (torch.Size, int..., tuple of int or list of int): The number of times to repeat this tensor along each dimension
4172
4173Example::
4174
4175    >>> x = torch.tensor([1, 2, 3])
4176    >>> x.repeat(4, 2)
4177    tensor([[ 1,  2,  3,  1,  2,  3],
4178            [ 1,  2,  3,  1,  2,  3],
4179            [ 1,  2,  3,  1,  2,  3],
4180            [ 1,  2,  3,  1,  2,  3]])
4181    >>> x.repeat(4, 2, 1).size()
4182    torch.Size([4, 2, 3])
4183""",
4184)
4185
4186add_docstr_all(
4187    "repeat_interleave",
4188    r"""
4189repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
4190
4191See :func:`torch.repeat_interleave`.
4192""",
4193)
4194
4195add_docstr_all(
4196    "requires_grad_",
4197    r"""
4198requires_grad_(requires_grad=True) -> Tensor
4199
4200Change if autograd should record operations on this tensor: sets this tensor's
4201:attr:`requires_grad` attribute in-place. Returns this tensor.
4202
4203:func:`requires_grad_`'s main use case is to tell autograd to begin recording
4204operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
4205(because it was obtained through a DataLoader, or required preprocessing or
4206initialization), ``tensor.requires_grad_()`` makes it so that autograd will
4207begin to record operations on ``tensor``.
4208
4209Args:
4210    requires_grad (bool): If autograd should record operations on this tensor.
4211        Default: ``True``.
4212
4213Example::
4214
4215    >>> # Let's say we want to preprocess some saved weights and use
4216    >>> # the result as new weights.
4217    >>> saved_weights = [0.1, 0.2, 0.3, 0.25]
4218    >>> loaded_weights = torch.tensor(saved_weights)
4219    >>> weights = preprocess(loaded_weights)  # some function
4220    >>> weights
4221    tensor([-0.5503,  0.4926, -2.1158, -0.8303])
4222
4223    >>> # Now, start to record operations done to weights
4224    >>> weights.requires_grad_()
4225    >>> out = weights.pow(2).sum()
4226    >>> out.backward()
4227    >>> weights.grad
4228    tensor([-1.1007,  0.9853, -4.2316, -1.6606])
4229
4230""",
4231)
4232
4233add_docstr_all(
4234    "reshape",
4235    r"""
4236reshape(*shape) -> Tensor
4237
4238Returns a tensor with the same data and number of elements as :attr:`self`
4239but with the specified shape. This method returns a view if :attr:`shape` is
4240compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
4241possible to return a view.
4242
4243See :func:`torch.reshape`
4244
4245Args:
4246    shape (tuple of ints or int...): the desired shape
4247
4248""",
4249)
4250
4251add_docstr_all(
4252    "reshape_as",
4253    r"""
4254reshape_as(other) -> Tensor
4255
4256Returns this tensor as the same shape as :attr:`other`.
4257``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
4258This method returns a view if ``other.sizes()`` is compatible with the current
4259shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
4260
4261Please see :meth:`reshape` for more information about ``reshape``.
4262
4263Args:
4264    other (:class:`torch.Tensor`): The result tensor has the same shape
4265        as :attr:`other`.
4266""",
4267)
4268
4269add_docstr_all(
4270    "resize_",
4271    r"""
4272resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
4273
4274Resizes :attr:`self` tensor to the specified size. If the number of elements is
4275larger than the current storage size, then the underlying storage is resized
4276to fit the new number of elements. If the number of elements is smaller, the
4277underlying storage is not changed. Existing elements are preserved but any new
4278memory is uninitialized.
4279
4280.. warning::
4281
4282    This is a low-level method. The storage is reinterpreted as C-contiguous,
4283    ignoring the current strides (unless the target size equals the current
4284    size, in which case the tensor is left unchanged). For most purposes, you
4285    will instead want to use :meth:`~Tensor.view()`, which checks for
4286    contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
4287    change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
4288
4289.. note::
4290
4291    If :func:`torch.use_deterministic_algorithms()` and
4292    :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
4293    ``True``, new elements are initialized to prevent nondeterministic behavior
4294    from using the result as an input to an operation. Floating point and
4295    complex values are set to NaN, and integer values are set to the maximum
4296    value.
4297
4298Args:
4299    sizes (torch.Size or int...): the desired size
4300    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
4301        Tensor. Default: ``torch.contiguous_format``. Note that memory format of
4302        :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
4303
4304Example::
4305
4306    >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
4307    >>> x.resize_(2, 2)
4308    tensor([[ 1,  2],
4309            [ 3,  4]])
4310""",
4311)
4312
4313add_docstr_all(
4314    "resize_as_",
4315    r"""
4316resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
4317
4318Resizes the :attr:`self` tensor to be the same size as the specified
4319:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
4320
4321Args:
4322    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
4323        Tensor. Default: ``torch.contiguous_format``. Note that memory format of
4324        :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
4325
4326""",
4327)
4328
4329add_docstr_all(
4330    "rot90",
4331    r"""
4332rot90(k, dims) -> Tensor
4333
4334See :func:`torch.rot90`
4335""",
4336)
4337
4338add_docstr_all(
4339    "round",
4340    r"""
4341round(decimals=0) -> Tensor
4342
4343See :func:`torch.round`
4344""",
4345)
4346
4347add_docstr_all(
4348    "round_",
4349    r"""
4350round_(decimals=0) -> Tensor
4351
4352In-place version of :meth:`~Tensor.round`
4353""",
4354)
4355
4356add_docstr_all(
4357    "rsqrt",
4358    r"""
4359rsqrt() -> Tensor
4360
4361See :func:`torch.rsqrt`
4362""",
4363)
4364
4365add_docstr_all(
4366    "rsqrt_",
4367    r"""
4368rsqrt_() -> Tensor
4369
4370In-place version of :meth:`~Tensor.rsqrt`
4371""",
4372)
4373
4374add_docstr_all(
4375    "scatter_",
4376    r"""
4377scatter_(dim, index, src, *, reduce=None) -> Tensor
4378
4379Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
4380specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
4381index is specified by its index in :attr:`src` for ``dimension != dim`` and by
4382the corresponding value in :attr:`index` for ``dimension = dim``.
4383
4384For a 3-D tensor, :attr:`self` is updated as::
4385
4386    self[index[i][j][k]][j][k] = src[i][j][k]  # if dim == 0
4387    self[i][index[i][j][k]][k] = src[i][j][k]  # if dim == 1
4388    self[i][j][index[i][j][k]] = src[i][j][k]  # if dim == 2
4389
4390This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
4391
4392:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
4393the same number of dimensions. It is also required that
4394``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
4395``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
4396Note that ``index`` and ``src`` do not broadcast.
4397
4398Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
4399between ``0`` and ``self.size(dim) - 1`` inclusive.
4400
4401.. warning::
4402
4403    When indices are not unique, the behavior is non-deterministic (one of the
4404    values from ``src`` will be picked arbitrarily) and the gradient will be
4405    incorrect (it will be propagated to all locations in the source that
4406    correspond to the same index)!
4407
4408.. note::
4409
4410    The backward pass is implemented only for ``src.shape == index.shape``.
4411
4412Additionally accepts an optional :attr:`reduce` argument that allows
4413specification of an optional reduction operation, which is applied to all
4414values in the tensor :attr:`src` into :attr:`self` at the indices
4415specified in the :attr:`index`. For each value in :attr:`src`, the reduction
4416operation is applied to an index in :attr:`self` which is specified by
4417its index in :attr:`src` for ``dimension != dim`` and by the corresponding
4418value in :attr:`index` for ``dimension = dim``.
4419
4420Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
4421is updated as::
4422
4423    self[index[i][j][k]][j][k] *= src[i][j][k]  # if dim == 0
4424    self[i][index[i][j][k]][k] *= src[i][j][k]  # if dim == 1
4425    self[i][j][index[i][j][k]] *= src[i][j][k]  # if dim == 2
4426
4427Reducing with the addition operation is the same as using
4428:meth:`~torch.Tensor.scatter_add_`.
4429
4430.. warning::
4431    The reduce argument with Tensor ``src`` is deprecated and will be removed in
4432    a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
4433    instead for more reduction options.
4434
4435Args:
4436    dim (int): the axis along which to index
4437    index (LongTensor): the indices of elements to scatter, can be either empty
4438        or of the same dimensionality as ``src``. When empty, the operation
4439        returns ``self`` unchanged.
4440    src (Tensor): the source element(s) to scatter.
4441
4442Keyword args:
4443    reduce (str, optional): reduction operation to apply, can be either
4444        ``'add'`` or ``'multiply'``.
4445
4446Example::
4447
4448    >>> src = torch.arange(1, 11).reshape((2, 5))
4449    >>> src
4450    tensor([[ 1,  2,  3,  4,  5],
4451            [ 6,  7,  8,  9, 10]])
4452    >>> index = torch.tensor([[0, 1, 2, 0]])
4453    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
4454    tensor([[1, 0, 0, 4, 0],
4455            [0, 2, 0, 0, 0],
4456            [0, 0, 3, 0, 0]])
4457    >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
4458    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
4459    tensor([[1, 2, 3, 0, 0],
4460            [6, 7, 0, 0, 8],
4461            [0, 0, 0, 0, 0]])
4462
4463    >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
4464    ...            1.23, reduce='multiply')
4465    tensor([[2.0000, 2.0000, 2.4600, 2.0000],
4466            [2.0000, 2.0000, 2.0000, 2.4600]])
4467    >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
4468    ...            1.23, reduce='add')
4469    tensor([[2.0000, 2.0000, 3.2300, 2.0000],
4470            [2.0000, 2.0000, 2.0000, 3.2300]])
4471
4472.. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
4473   :noindex:
4474
4475Writes the value from :attr:`value` into :attr:`self` at the indices
4476specified in the :attr:`index` tensor.  This operation is equivalent to the previous version,
4477with the :attr:`src` tensor filled entirely with :attr:`value`.
4478
4479Args:
4480    dim (int): the axis along which to index
4481    index (LongTensor): the indices of elements to scatter, can be either empty
4482        or of the same dimensionality as ``src``. When empty, the operation
4483        returns ``self`` unchanged.
4484    value (Scalar): the value to scatter.
4485
4486Keyword args:
4487    reduce (str, optional): reduction operation to apply, can be either
4488        ``'add'`` or ``'multiply'``.
4489
4490Example::
4491
4492    >>> index = torch.tensor([[0, 1]])
4493    >>> value = 2
4494    >>> torch.zeros(3, 5).scatter_(0, index, value)
4495    tensor([[2., 0., 0., 0., 0.],
4496            [0., 2., 0., 0., 0.],
4497            [0., 0., 0., 0., 0.]])
4498""",
4499)
4500
4501add_docstr_all(
4502    "scatter_add_",
4503    r"""
4504scatter_add_(dim, index, src) -> Tensor
4505
4506Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
4507specified in the :attr:`index` tensor in a similar fashion as
4508:meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
4509an index in :attr:`self` which is specified by its index in :attr:`src`
4510for ``dimension != dim`` and by the corresponding value in :attr:`index` for
4511``dimension = dim``.
4512
4513For a 3-D tensor, :attr:`self` is updated as::
4514
4515    self[index[i][j][k]][j][k] += src[i][j][k]  # if dim == 0
4516    self[i][index[i][j][k]][k] += src[i][j][k]  # if dim == 1
4517    self[i][j][index[i][j][k]] += src[i][j][k]  # if dim == 2
4518
4519:attr:`self`, :attr:`index` and :attr:`src` should have same number of
4520dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
4521dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
4522``d != dim``. Note that ``index`` and ``src`` do not broadcast.
4523
4524Note:
4525    {forward_reproducibility_note}
4526
4527.. note::
4528
4529    The backward pass is implemented only for ``src.shape == index.shape``.
4530
4531Args:
4532    dim (int): the axis along which to index
4533    index (LongTensor): the indices of elements to scatter and add, can be
4534        either empty or of the same dimensionality as ``src``. When empty, the
4535        operation returns ``self`` unchanged.
4536    src (Tensor): the source elements to scatter and add
4537
4538Example::
4539
4540    >>> src = torch.ones((2, 5))
4541    >>> index = torch.tensor([[0, 1, 2, 0, 0]])
4542    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
4543    tensor([[1., 0., 0., 1., 1.],
4544            [0., 1., 0., 0., 0.],
4545            [0., 0., 1., 0., 0.]])
4546    >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
4547    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
4548    tensor([[2., 0., 0., 1., 1.],
4549            [0., 2., 0., 0., 0.],
4550            [0., 0., 2., 1., 1.]])
4551
4552""".format(**reproducibility_notes),
4553)
4554
4555add_docstr_all(
4556    "scatter_reduce_",
4557    r"""
4558scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor
4559
4560Reduces all values from the :attr:`src` tensor to the indices specified in
4561the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
4562defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
4563:obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
4564index in :attr:`self` which is specified by its index in :attr:`src` for
4565``dimension != dim`` and by the corresponding value in :attr:`index` for
4566``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
4567tensor are included in the reduction.
4568
4569:attr:`self`, :attr:`index` and :attr:`src` should all have
4570the same number of dimensions. It is also required that
4571``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
4572``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
4573Note that ``index`` and ``src`` do not broadcast.
4574
4575For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
4576output is given as::
4577
4578    self[index[i][j][k]][j][k] += src[i][j][k]  # if dim == 0
4579    self[i][index[i][j][k]][k] += src[i][j][k]  # if dim == 1
4580    self[i][j][index[i][j][k]] += src[i][j][k]  # if dim == 2
4581
4582Note:
4583    {forward_reproducibility_note}
4584
4585.. note::
4586
4587    The backward pass is implemented only for ``src.shape == index.shape``.
4588
4589.. warning::
4590
4591    This function is in beta and may change in the near future.
4592
4593Args:
4594    dim (int): the axis along which to index
4595    index (LongTensor): the indices of elements to scatter and reduce.
4596    src (Tensor): the source elements to scatter and reduce
4597    reduce (str): the reduction operation to apply for non-unique indices
4598        (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
4599    include_self (bool): whether elements from the :attr:`self` tensor are
4600        included in the reduction
4601
4602Example::
4603
4604    >>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
4605    >>> index = torch.tensor([0, 1, 0, 1, 2, 1])
4606    >>> input = torch.tensor([1., 2., 3., 4.])
4607    >>> input.scatter_reduce(0, index, src, reduce="sum")
4608    tensor([5., 14., 8., 4.])
4609    >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
4610    tensor([4., 12., 5., 4.])
4611    >>> input2 = torch.tensor([5., 4., 3., 2.])
4612    >>> input2.scatter_reduce(0, index, src, reduce="amax")
4613    tensor([5., 6., 5., 2.])
4614    >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
4615    tensor([3., 6., 5., 2.])
4616
4617
4618""".format(**reproducibility_notes),
4619)
4620
4621add_docstr_all(
4622    "select",
4623    r"""
4624select(dim, index) -> Tensor
4625
4626See :func:`torch.select`
4627""",
4628)
4629
4630add_docstr_all(
4631    "select_scatter",
4632    r"""
4633select_scatter(src, dim, index) -> Tensor
4634
4635See :func:`torch.select_scatter`
4636""",
4637)
4638
4639add_docstr_all(
4640    "slice_scatter",
4641    r"""
4642slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor
4643
4644See :func:`torch.slice_scatter`
4645""",
4646)
4647
4648add_docstr_all(
4649    "set_",
4650    r"""
4651set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
4652
4653Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
4654:attr:`self` tensor will share the same storage and have the same size and
4655strides as :attr:`source`. Changes to elements in one tensor will be reflected
4656in the other.
4657
4658If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
4659storage, offset, size, and stride.
4660
4661Args:
4662    source (Tensor or Storage): the tensor or storage to use
4663    storage_offset (int, optional): the offset in the storage
4664    size (torch.Size, optional): the desired size. Defaults to the size of the source.
4665    stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
4666""",
4667)
4668
4669add_docstr_all(
4670    "sigmoid",
4671    r"""
4672sigmoid() -> Tensor
4673
4674See :func:`torch.sigmoid`
4675""",
4676)
4677
4678add_docstr_all(
4679    "sigmoid_",
4680    r"""
4681sigmoid_() -> Tensor
4682
4683In-place version of :meth:`~Tensor.sigmoid`
4684""",
4685)
4686
4687add_docstr_all(
4688    "logit",
4689    r"""
4690logit() -> Tensor
4691
4692See :func:`torch.logit`
4693""",
4694)
4695
4696add_docstr_all(
4697    "logit_",
4698    r"""
4699logit_() -> Tensor
4700
4701In-place version of :meth:`~Tensor.logit`
4702""",
4703)
4704
4705add_docstr_all(
4706    "sign",
4707    r"""
4708sign() -> Tensor
4709
4710See :func:`torch.sign`
4711""",
4712)
4713
4714add_docstr_all(
4715    "sign_",
4716    r"""
4717sign_() -> Tensor
4718
4719In-place version of :meth:`~Tensor.sign`
4720""",
4721)
4722
4723add_docstr_all(
4724    "signbit",
4725    r"""
4726signbit() -> Tensor
4727
4728See :func:`torch.signbit`
4729""",
4730)
4731
4732add_docstr_all(
4733    "sgn",
4734    r"""
4735sgn() -> Tensor
4736
4737See :func:`torch.sgn`
4738""",
4739)
4740
4741add_docstr_all(
4742    "sgn_",
4743    r"""
4744sgn_() -> Tensor
4745
4746In-place version of :meth:`~Tensor.sgn`
4747""",
4748)
4749
4750add_docstr_all(
4751    "sin",
4752    r"""
4753sin() -> Tensor
4754
4755See :func:`torch.sin`
4756""",
4757)
4758
4759add_docstr_all(
4760    "sin_",
4761    r"""
4762sin_() -> Tensor
4763
4764In-place version of :meth:`~Tensor.sin`
4765""",
4766)
4767
4768add_docstr_all(
4769    "sinc",
4770    r"""
4771sinc() -> Tensor
4772
4773See :func:`torch.sinc`
4774""",
4775)
4776
4777add_docstr_all(
4778    "sinc_",
4779    r"""
4780sinc_() -> Tensor
4781
4782In-place version of :meth:`~Tensor.sinc`
4783""",
4784)
4785
4786add_docstr_all(
4787    "sinh",
4788    r"""
4789sinh() -> Tensor
4790
4791See :func:`torch.sinh`
4792""",
4793)
4794
4795add_docstr_all(
4796    "sinh_",
4797    r"""
4798sinh_() -> Tensor
4799
4800In-place version of :meth:`~Tensor.sinh`
4801""",
4802)
4803
4804add_docstr_all(
4805    "size",
4806    r"""
4807size(dim=None) -> torch.Size or int
4808
4809Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
4810the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
4811If ``dim`` is specified, returns an int holding the size of that dimension.
4812
4813Args:
4814  dim (int, optional): The dimension for which to retrieve the size.
4815
4816Example::
4817
4818    >>> t = torch.empty(3, 4, 5)
4819    >>> t.size()
4820    torch.Size([3, 4, 5])
4821    >>> t.size(dim=1)
4822    4
4823
4824""",
4825)
4826
4827add_docstr_all(
4828    "shape",
4829    r"""
4830shape() -> torch.Size
4831
4832Returns the size of the :attr:`self` tensor. Alias for :attr:`size`.
4833
4834See also :meth:`Tensor.size`.
4835
4836Example::
4837
4838    >>> t = torch.empty(3, 4, 5)
4839    >>> t.size()
4840    torch.Size([3, 4, 5])
4841    >>> t.shape
4842    torch.Size([3, 4, 5])
4843
4844""",
4845)
4846
4847add_docstr_all(
4848    "sort",
4849    r"""
4850sort(dim=-1, descending=False) -> (Tensor, LongTensor)
4851
4852See :func:`torch.sort`
4853""",
4854)
4855
4856add_docstr_all(
4857    "msort",
4858    r"""
4859msort() -> Tensor
4860
4861See :func:`torch.msort`
4862""",
4863)
4864
4865add_docstr_all(
4866    "argsort",
4867    r"""
4868argsort(dim=-1, descending=False) -> LongTensor
4869
4870See :func:`torch.argsort`
4871""",
4872)
4873
4874add_docstr_all(
4875    "sparse_dim",
4876    r"""
4877sparse_dim() -> int
4878
4879Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
4880
4881.. note::
4882  Returns ``0`` if :attr:`self` is not a sparse tensor.
4883
4884See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
4885""",
4886)
4887
4888add_docstr_all(
4889    "sparse_resize_",
4890    r"""
4891sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
4892
4893Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
4894size and the number of sparse and dense dimensions.
4895
4896.. note::
4897  If the number of specified elements in :attr:`self` is zero, then
4898  :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
4899  size and positive integers such that ``len(size) == sparse_dim +
4900  dense_dim``.
4901
4902  If :attr:`self` specifies one or more elements, however, then each
4903  dimension in :attr:`size` must not be smaller than the corresponding
4904  dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
4905  of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
4906  equal the number of dense dimensions in :attr:`self`.
4907
4908.. warning::
4909  Throws an error if :attr:`self` is not a sparse tensor.
4910
4911Args:
4912    size (torch.Size): the desired size. If :attr:`self` is non-empty
4913      sparse tensor, the desired size cannot be smaller than the
4914      original size.
4915    sparse_dim (int): the number of sparse dimensions
4916    dense_dim (int): the number of dense dimensions
4917""",
4918)
4919
4920add_docstr_all(
4921    "sparse_resize_and_clear_",
4922    r"""
4923sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
4924
4925Removes all specified elements from a :ref:`sparse tensor
4926<sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
4927size and the number of sparse and dense dimensions.
4928
4929.. warning:
4930  Throws an error if :attr:`self` is not a sparse tensor.
4931
4932Args:
4933    size (torch.Size): the desired size.
4934    sparse_dim (int): the number of sparse dimensions
4935    dense_dim (int): the number of dense dimensions
4936""",
4937)
4938
4939add_docstr_all(
4940    "sqrt",
4941    r"""
4942sqrt() -> Tensor
4943
4944See :func:`torch.sqrt`
4945""",
4946)
4947
4948add_docstr_all(
4949    "sqrt_",
4950    r"""
4951sqrt_() -> Tensor
4952
4953In-place version of :meth:`~Tensor.sqrt`
4954""",
4955)
4956
4957add_docstr_all(
4958    "square",
4959    r"""
4960square() -> Tensor
4961
4962See :func:`torch.square`
4963""",
4964)
4965
4966add_docstr_all(
4967    "square_",
4968    r"""
4969square_() -> Tensor
4970
4971In-place version of :meth:`~Tensor.square`
4972""",
4973)
4974
4975add_docstr_all(
4976    "squeeze",
4977    r"""
4978squeeze(dim=None) -> Tensor
4979
4980See :func:`torch.squeeze`
4981""",
4982)
4983
4984add_docstr_all(
4985    "squeeze_",
4986    r"""
4987squeeze_(dim=None) -> Tensor
4988
4989In-place version of :meth:`~Tensor.squeeze`
4990""",
4991)
4992
4993add_docstr_all(
4994    "std",
4995    r"""
4996std(dim=None, *, correction=1, keepdim=False) -> Tensor
4997
4998See :func:`torch.std`
4999""",
5000)
5001
5002add_docstr_all(
5003    "storage_offset",
5004    r"""
5005storage_offset() -> int
5006
5007Returns :attr:`self` tensor's offset in the underlying storage in terms of
5008number of storage elements (not bytes).
5009
5010Example::
5011
5012    >>> x = torch.tensor([1, 2, 3, 4, 5])
5013    >>> x.storage_offset()
5014    0
5015    >>> x[3:].storage_offset()
5016    3
5017
5018""",
5019)
5020
5021add_docstr_all(
5022    "untyped_storage",
5023    r"""
5024untyped_storage() -> torch.UntypedStorage
5025
5026Returns the underlying :class:`UntypedStorage`.
5027""",
5028)
5029
5030add_docstr_all(
5031    "stride",
5032    r"""
5033stride(dim) -> tuple or int
5034
5035Returns the stride of :attr:`self` tensor.
5036
5037Stride is the jump necessary to go from one element to the next one in the
5038specified dimension :attr:`dim`. A tuple of all strides is returned when no
5039argument is passed in. Otherwise, an integer value is returned as the stride in
5040the particular dimension :attr:`dim`.
5041
5042Args:
5043    dim (int, optional): the desired dimension in which stride is required
5044
5045Example::
5046
5047    >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
5048    >>> x.stride()
5049    (5, 1)
5050    >>> x.stride(0)
5051    5
5052    >>> x.stride(-1)
5053    1
5054
5055""",
5056)
5057
5058add_docstr_all(
5059    "sub",
5060    r"""
5061sub(other, *, alpha=1) -> Tensor
5062
5063See :func:`torch.sub`.
5064""",
5065)
5066
5067add_docstr_all(
5068    "sub_",
5069    r"""
5070sub_(other, *, alpha=1) -> Tensor
5071
5072In-place version of :meth:`~Tensor.sub`
5073""",
5074)
5075
5076add_docstr_all(
5077    "subtract",
5078    r"""
5079subtract(other, *, alpha=1) -> Tensor
5080
5081See :func:`torch.subtract`.
5082""",
5083)
5084
5085add_docstr_all(
5086    "subtract_",
5087    r"""
5088subtract_(other, *, alpha=1) -> Tensor
5089
5090In-place version of :meth:`~Tensor.subtract`.
5091""",
5092)
5093
5094add_docstr_all(
5095    "sum",
5096    r"""
5097sum(dim=None, keepdim=False, dtype=None) -> Tensor
5098
5099See :func:`torch.sum`
5100""",
5101)
5102
5103add_docstr_all(
5104    "nansum",
5105    r"""
5106nansum(dim=None, keepdim=False, dtype=None) -> Tensor
5107
5108See :func:`torch.nansum`
5109""",
5110)
5111
5112add_docstr_all(
5113    "svd",
5114    r"""
5115svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
5116
5117See :func:`torch.svd`
5118""",
5119)
5120
5121add_docstr_all(
5122    "swapdims",
5123    r"""
5124swapdims(dim0, dim1) -> Tensor
5125
5126See :func:`torch.swapdims`
5127""",
5128)
5129
5130add_docstr_all(
5131    "swapdims_",
5132    r"""
5133swapdims_(dim0, dim1) -> Tensor
5134
5135In-place version of :meth:`~Tensor.swapdims`
5136""",
5137)
5138
5139add_docstr_all(
5140    "swapaxes",
5141    r"""
5142swapaxes(axis0, axis1) -> Tensor
5143
5144See :func:`torch.swapaxes`
5145""",
5146)
5147
5148add_docstr_all(
5149    "swapaxes_",
5150    r"""
5151swapaxes_(axis0, axis1) -> Tensor
5152
5153In-place version of :meth:`~Tensor.swapaxes`
5154""",
5155)
5156
5157add_docstr_all(
5158    "t",
5159    r"""
5160t() -> Tensor
5161
5162See :func:`torch.t`
5163""",
5164)
5165
5166add_docstr_all(
5167    "t_",
5168    r"""
5169t_() -> Tensor
5170
5171In-place version of :meth:`~Tensor.t`
5172""",
5173)
5174
5175add_docstr_all(
5176    "tile",
5177    r"""
5178tile(dims) -> Tensor
5179
5180See :func:`torch.tile`
5181""",
5182)
5183
5184add_docstr_all(
5185    "to",
5186    r"""
5187to(*args, **kwargs) -> Tensor
5188
5189Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
5190inferred from the arguments of ``self.to(*args, **kwargs)``.
5191
5192.. note::
5193
5194    If the ``self`` Tensor already
5195    has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
5196    Otherwise, the returned tensor is a copy of ``self`` with the desired
5197    :class:`torch.dtype` and :class:`torch.device`.
5198
5199Here are the ways to call ``to``:
5200
5201.. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
5202   :noindex:
5203
5204    Returns a Tensor with the specified :attr:`dtype`
5205
5206    Args:
5207        {memory_format}
5208
5209.. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
5210   :noindex:
5211
5212    Returns a Tensor with the specified :attr:`device` and (optional)
5213    :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
5214    When :attr:`non_blocking`, tries to convert asynchronously with respect to
5215    the host if possible, e.g., converting a CPU Tensor with pinned memory to a
5216    CUDA Tensor.
5217    When :attr:`copy` is set, a new Tensor is created even when the Tensor
5218    already matches the desired conversion.
5219
5220    Args:
5221        {memory_format}
5222
5223.. method:: to(other, non_blocking=False, copy=False) -> Tensor
5224   :noindex:
5225
5226    Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
5227    the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
5228    asynchronously with respect to the host if possible, e.g., converting a CPU
5229    Tensor with pinned memory to a CUDA Tensor.
5230    When :attr:`copy` is set, a new Tensor is created even when the Tensor
5231    already matches the desired conversion.
5232
5233Example::
5234
5235    >>> tensor = torch.randn(2, 2)  # Initially dtype=float32, device=cpu
5236    >>> tensor.to(torch.float64)
5237    tensor([[-0.5044,  0.0005],
5238            [ 0.3310, -0.0584]], dtype=torch.float64)
5239
5240    >>> cuda0 = torch.device('cuda:0')
5241    >>> tensor.to(cuda0)
5242    tensor([[-0.5044,  0.0005],
5243            [ 0.3310, -0.0584]], device='cuda:0')
5244
5245    >>> tensor.to(cuda0, dtype=torch.float64)
5246    tensor([[-0.5044,  0.0005],
5247            [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
5248
5249    >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
5250    >>> tensor.to(other, non_blocking=True)
5251    tensor([[-0.5044,  0.0005],
5252            [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
5253""".format(**common_args),
5254)
5255
5256add_docstr_all(
5257    "byte",
5258    r"""
5259byte(memory_format=torch.preserve_format) -> Tensor
5260
5261``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
5262
5263Args:
5264    {memory_format}
5265""".format(**common_args),
5266)
5267
5268add_docstr_all(
5269    "bool",
5270    r"""
5271bool(memory_format=torch.preserve_format) -> Tensor
5272
5273``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
5274
5275Args:
5276    {memory_format}
5277""".format(**common_args),
5278)
5279
5280add_docstr_all(
5281    "char",
5282    r"""
5283char(memory_format=torch.preserve_format) -> Tensor
5284
5285``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
5286
5287Args:
5288    {memory_format}
5289""".format(**common_args),
5290)
5291
5292add_docstr_all(
5293    "bfloat16",
5294    r"""
5295bfloat16(memory_format=torch.preserve_format) -> Tensor
5296``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
5297
5298Args:
5299    {memory_format}
5300""".format(**common_args),
5301)
5302
5303add_docstr_all(
5304    "double",
5305    r"""
5306double(memory_format=torch.preserve_format) -> Tensor
5307
5308``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
5309
5310Args:
5311    {memory_format}
5312""".format(**common_args),
5313)
5314
5315add_docstr_all(
5316    "float",
5317    r"""
5318float(memory_format=torch.preserve_format) -> Tensor
5319
5320``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
5321
5322Args:
5323    {memory_format}
5324""".format(**common_args),
5325)
5326
5327add_docstr_all(
5328    "cdouble",
5329    r"""
5330cdouble(memory_format=torch.preserve_format) -> Tensor
5331
5332``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`.
5333
5334Args:
5335    {memory_format}
5336""".format(**common_args),
5337)
5338
5339add_docstr_all(
5340    "cfloat",
5341    r"""
5342cfloat(memory_format=torch.preserve_format) -> Tensor
5343
5344``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`.
5345
5346Args:
5347    {memory_format}
5348""".format(**common_args),
5349)
5350
5351add_docstr_all(
5352    "chalf",
5353    r"""
5354chalf(memory_format=torch.preserve_format) -> Tensor
5355
5356``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.
5357
5358Args:
5359     {memory_format}
5360 """.format(**common_args),
5361)
5362
5363add_docstr_all(
5364    "half",
5365    r"""
5366half(memory_format=torch.preserve_format) -> Tensor
5367
5368``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
5369
5370Args:
5371    {memory_format}
5372""".format(**common_args),
5373)
5374
5375add_docstr_all(
5376    "int",
5377    r"""
5378int(memory_format=torch.preserve_format) -> Tensor
5379
5380``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
5381
5382Args:
5383    {memory_format}
5384""".format(**common_args),
5385)
5386
5387add_docstr_all(
5388    "int_repr",
5389    r"""
5390int_repr() -> Tensor
5391
5392Given a quantized Tensor,
5393``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
5394underlying uint8_t values of the given Tensor.
5395""",
5396)
5397
5398
5399add_docstr_all(
5400    "long",
5401    r"""
5402long(memory_format=torch.preserve_format) -> Tensor
5403
5404``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
5405
5406Args:
5407    {memory_format}
5408""".format(**common_args),
5409)
5410
5411add_docstr_all(
5412    "short",
5413    r"""
5414short(memory_format=torch.preserve_format) -> Tensor
5415
5416``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
5417
5418Args:
5419    {memory_format}
5420""".format(**common_args),
5421)
5422
5423add_docstr_all(
5424    "take",
5425    r"""
5426take(indices) -> Tensor
5427
5428See :func:`torch.take`
5429""",
5430)
5431
5432add_docstr_all(
5433    "take_along_dim",
5434    r"""
5435take_along_dim(indices, dim) -> Tensor
5436
5437See :func:`torch.take_along_dim`
5438""",
5439)
5440
5441add_docstr_all(
5442    "tan",
5443    r"""
5444tan() -> Tensor
5445
5446See :func:`torch.tan`
5447""",
5448)
5449
5450add_docstr_all(
5451    "tan_",
5452    r"""
5453tan_() -> Tensor
5454
5455In-place version of :meth:`~Tensor.tan`
5456""",
5457)
5458
5459add_docstr_all(
5460    "tanh",
5461    r"""
5462tanh() -> Tensor
5463
5464See :func:`torch.tanh`
5465""",
5466)
5467
5468add_docstr_all(
5469    "softmax",
5470    r"""
5471softmax(dim) -> Tensor
5472
5473Alias for :func:`torch.nn.functional.softmax`.
5474""",
5475)
5476
5477add_docstr_all(
5478    "tanh_",
5479    r"""
5480tanh_() -> Tensor
5481
5482In-place version of :meth:`~Tensor.tanh`
5483""",
5484)
5485
5486add_docstr_all(
5487    "tolist",
5488    r"""
5489tolist() -> list or number
5490
5491Returns the tensor as a (nested) list. For scalars, a standard
5492Python number is returned, just like with :meth:`~Tensor.item`.
5493Tensors are automatically moved to the CPU first if necessary.
5494
5495This operation is not differentiable.
5496
5497Examples::
5498
5499    >>> a = torch.randn(2, 2)
5500    >>> a.tolist()
5501    [[0.012766935862600803, 0.5415473580360413],
5502     [-0.08909505605697632, 0.7729271650314331]]
5503    >>> a[0,0].tolist()
5504    0.012766935862600803
5505""",
5506)
5507
5508add_docstr_all(
5509    "topk",
5510    r"""
5511topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
5512
5513See :func:`torch.topk`
5514""",
5515)
5516
5517add_docstr_all(
5518    "to_dense",
5519    r"""
5520to_dense(dtype=None, *, masked_grad=True) -> Tensor
5521
5522Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.
5523
5524Keyword args:
5525    {dtype}
5526    masked_grad (bool, optional): If set to ``True`` (default) and
5527      :attr:`self` has a sparse layout then the backward of
5528      :meth:`to_dense` returns ``grad.sparse_mask(self)``.
5529
5530Example::
5531
5532    >>> s = torch.sparse_coo_tensor(
5533    ...        torch.tensor([[1, 1],
5534    ...                      [0, 2]]),
5535    ...        torch.tensor([9, 10]),
5536    ...        size=(3, 3))
5537    >>> s.to_dense()
5538    tensor([[ 0,  0,  0],
5539            [ 9,  0, 10],
5540            [ 0,  0,  0]])
5541""",
5542)
5543
5544add_docstr_all(
5545    "to_sparse",
5546    r"""
5547to_sparse(sparseDims) -> Tensor
5548
5549Returns a sparse copy of the tensor.  PyTorch supports sparse tensors in
5550:ref:`coordinate format <sparse-coo-docs>`.
5551
5552Args:
5553    sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
5554
5555Example::
5556
5557    >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
5558    >>> d
5559    tensor([[ 0,  0,  0],
5560            [ 9,  0, 10],
5561            [ 0,  0,  0]])
5562    >>> d.to_sparse()
5563    tensor(indices=tensor([[1, 1],
5564                           [0, 2]]),
5565           values=tensor([ 9, 10]),
5566           size=(3, 3), nnz=2, layout=torch.sparse_coo)
5567    >>> d.to_sparse(1)
5568    tensor(indices=tensor([[1]]),
5569           values=tensor([[ 9,  0, 10]]),
5570           size=(3, 3), nnz=1, layout=torch.sparse_coo)
5571
5572.. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
5573   :noindex:
5574
5575Returns a sparse tensor with the specified layout and blocksize.  If
5576the :attr:`self` is strided, the number of dense dimensions could be
5577specified, and a hybrid sparse tensor will be created, with
5578`dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
5579dimension.
5580
5581.. note:: If the :attr:`self` layout and blocksize parameters match
5582          with the specified layout and blocksize, return
5583          :attr:`self`. Otherwise, return a sparse tensor copy of
5584          :attr:`self`.
5585
5586Args:
5587
5588    layout (:class:`torch.layout`, optional): The desired sparse
5589      layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
5590      ``torch.sparse_csc``, ``torch.sparse_bsr``, or
5591      ``torch.sparse_bsc``. Default: if ``None``,
5592      ``torch.sparse_coo``.
5593
5594    blocksize (list, tuple, :class:`torch.Size`, optional): Block size
5595      of the resulting BSR or BSC tensor. For other layouts,
5596      specifying the block size that is not ``None`` will result in a
5597      RuntimeError exception.  A block size must be a tuple of length
5598      two such that its items evenly divide the two sparse dimensions.
5599
5600    dense_dim (int, optional): Number of dense dimensions of the
5601      resulting CSR, CSC, BSR or BSC tensor.  This argument should be
5602      used only if :attr:`self` is a strided tensor, and must be a
5603      value between 0 and dimension of :attr:`self` tensor minus two.
5604
5605Example::
5606
5607    >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
5608    >>> x.to_sparse(layout=torch.sparse_coo)
5609    tensor(indices=tensor([[0, 2, 2],
5610                           [0, 0, 1]]),
5611           values=tensor([1, 2, 3]),
5612           size=(3, 2), nnz=3, layout=torch.sparse_coo)
5613    >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
5614    tensor(crow_indices=tensor([0, 1, 1, 2]),
5615           col_indices=tensor([0, 0]),
5616           values=tensor([[[1, 0]],
5617                          [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
5618    >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
5619    RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
5620    >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
5621    RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
5622
5623    >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
5624    >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
5625    tensor(crow_indices=tensor([0, 1, 1, 3]),
5626           col_indices=tensor([0, 0, 1]),
5627           values=tensor([[1],
5628                          [2],
5629                          [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
5630
5631""",
5632)
5633
5634add_docstr_all(
5635    "to_sparse_csr",
5636    r"""
5637to_sparse_csr(dense_dim=None) -> Tensor
5638
5639Convert a tensor to compressed row storage format (CSR).  Except for
5640strided tensors, only works with 2D tensors.  If the :attr:`self` is
5641strided, then the number of dense dimensions could be specified, and a
5642hybrid CSR tensor will be created, with `dense_dim` dense dimensions
5643and `self.dim() - 2 - dense_dim` batch dimension.
5644
5645Args:
5646
5647    dense_dim (int, optional): Number of dense dimensions of the
5648      resulting CSR tensor.  This argument should be used only if
5649      :attr:`self` is a strided tensor, and must be a value between 0
5650      and dimension of :attr:`self` tensor minus two.
5651
5652Example::
5653
5654    >>> dense = torch.randn(5, 5)
5655    >>> sparse = dense.to_sparse_csr()
5656    >>> sparse._nnz()
5657    25
5658
5659    >>> dense = torch.zeros(3, 3, 1, 1)
5660    >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
5661    >>> dense.to_sparse_csr(dense_dim=2)
5662    tensor(crow_indices=tensor([0, 1, 2, 3]),
5663           col_indices=tensor([0, 2, 1]),
5664           values=tensor([[[1.]],
5665
5666                          [[1.]],
5667
5668                          [[1.]]]), size=(3, 3, 1, 1), nnz=3,
5669           layout=torch.sparse_csr)
5670
5671""",
5672)
5673
5674add_docstr_all(
5675    "to_sparse_csc",
5676    r"""
5677to_sparse_csc() -> Tensor
5678
5679Convert a tensor to compressed column storage (CSC) format.  Except
5680for strided tensors, only works with 2D tensors.  If the :attr:`self`
5681is strided, then the number of dense dimensions could be specified,
5682and a hybrid CSC tensor will be created, with `dense_dim` dense
5683dimensions and `self.dim() - 2 - dense_dim` batch dimension.
5684
5685Args:
5686
5687    dense_dim (int, optional): Number of dense dimensions of the
5688      resulting CSC tensor.  This argument should be used only if
5689      :attr:`self` is a strided tensor, and must be a value between 0
5690      and dimension of :attr:`self` tensor minus two.
5691
5692Example::
5693
5694    >>> dense = torch.randn(5, 5)
5695    >>> sparse = dense.to_sparse_csc()
5696    >>> sparse._nnz()
5697    25
5698
5699    >>> dense = torch.zeros(3, 3, 1, 1)
5700    >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
5701    >>> dense.to_sparse_csc(dense_dim=2)
5702    tensor(ccol_indices=tensor([0, 1, 2, 3]),
5703           row_indices=tensor([0, 2, 1]),
5704           values=tensor([[[1.]],
5705
5706                          [[1.]],
5707
5708                          [[1.]]]), size=(3, 3, 1, 1), nnz=3,
5709           layout=torch.sparse_csc)
5710
5711""",
5712)
5713
5714add_docstr_all(
5715    "to_sparse_bsr",
5716    r"""
5717to_sparse_bsr(blocksize, dense_dim) -> Tensor
5718
5719Convert a tensor to a block sparse row (BSR) storage format of given
5720blocksize.  If the :attr:`self` is strided, then the number of dense
5721dimensions could be specified, and a hybrid BSR tensor will be
5722created, with `dense_dim` dense dimensions and `self.dim() - 2 -
5723dense_dim` batch dimension.
5724
5725Args:
5726
5727    blocksize (list, tuple, :class:`torch.Size`, optional): Block size
5728      of the resulting BSR tensor. A block size must be a tuple of
5729      length two such that its items evenly divide the two sparse
5730      dimensions.
5731
5732    dense_dim (int, optional): Number of dense dimensions of the
5733      resulting BSR tensor.  This argument should be used only if
5734      :attr:`self` is a strided tensor, and must be a value between 0
5735      and dimension of :attr:`self` tensor minus two.
5736
5737Example::
5738
5739    >>> dense = torch.randn(10, 10)
5740    >>> sparse = dense.to_sparse_csr()
5741    >>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
5742    >>> sparse_bsr.col_indices()
5743    tensor([0, 1, 0, 1])
5744
5745    >>> dense = torch.zeros(4, 3, 1)
5746    >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
5747    >>> dense.to_sparse_bsr((2, 1), 1)
5748    tensor(crow_indices=tensor([0, 2, 3]),
5749           col_indices=tensor([0, 2, 1]),
5750           values=tensor([[[[1.]],
5751
5752                           [[1.]]],
5753
5754
5755                          [[[1.]],
5756
5757                           [[1.]]],
5758
5759
5760                          [[[1.]],
5761
5762                           [[1.]]]]), size=(4, 3, 1), nnz=3,
5763           layout=torch.sparse_bsr)
5764
5765""",
5766)
5767
5768add_docstr_all(
5769    "to_sparse_bsc",
5770    r"""
5771to_sparse_bsc(blocksize, dense_dim) -> Tensor
5772
5773Convert a tensor to a block sparse column (BSC) storage format of
5774given blocksize.  If the :attr:`self` is strided, then the number of
5775dense dimensions could be specified, and a hybrid BSC tensor will be
5776created, with `dense_dim` dense dimensions and `self.dim() - 2 -
5777dense_dim` batch dimension.
5778
5779Args:
5780
5781    blocksize (list, tuple, :class:`torch.Size`, optional): Block size
5782      of the resulting BSC tensor. A block size must be a tuple of
5783      length two such that its items evenly divide the two sparse
5784      dimensions.
5785
5786    dense_dim (int, optional): Number of dense dimensions of the
5787      resulting BSC tensor.  This argument should be used only if
5788      :attr:`self` is a strided tensor, and must be a value between 0
5789      and dimension of :attr:`self` tensor minus two.
5790
5791Example::
5792
5793    >>> dense = torch.randn(10, 10)
5794    >>> sparse = dense.to_sparse_csr()
5795    >>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
5796    >>> sparse_bsc.row_indices()
5797    tensor([0, 1, 0, 1])
5798
5799    >>> dense = torch.zeros(4, 3, 1)
5800    >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
5801    >>> dense.to_sparse_bsc((2, 1), 1)
5802    tensor(ccol_indices=tensor([0, 1, 2, 3]),
5803           row_indices=tensor([0, 1, 0]),
5804           values=tensor([[[[1.]],
5805
5806                           [[1.]]],
5807
5808
5809                          [[[1.]],
5810
5811                           [[1.]]],
5812
5813
5814                          [[[1.]],
5815
5816                           [[1.]]]]), size=(4, 3, 1), nnz=3,
5817           layout=torch.sparse_bsc)
5818
5819""",
5820)
5821
5822add_docstr_all(
5823    "to_mkldnn",
5824    r"""
5825to_mkldnn() -> Tensor
5826Returns a copy of the tensor in ``torch.mkldnn`` layout.
5827
5828""",
5829)
5830
5831add_docstr_all(
5832    "trace",
5833    r"""
5834trace() -> Tensor
5835
5836See :func:`torch.trace`
5837""",
5838)
5839
5840add_docstr_all(
5841    "transpose",
5842    r"""
5843transpose(dim0, dim1) -> Tensor
5844
5845See :func:`torch.transpose`
5846""",
5847)
5848
5849add_docstr_all(
5850    "transpose_",
5851    r"""
5852transpose_(dim0, dim1) -> Tensor
5853
5854In-place version of :meth:`~Tensor.transpose`
5855""",
5856)
5857
5858add_docstr_all(
5859    "triangular_solve",
5860    r"""
5861triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
5862
5863See :func:`torch.triangular_solve`
5864""",
5865)
5866
5867add_docstr_all(
5868    "tril",
5869    r"""
5870tril(diagonal=0) -> Tensor
5871
5872See :func:`torch.tril`
5873""",
5874)
5875
5876add_docstr_all(
5877    "tril_",
5878    r"""
5879tril_(diagonal=0) -> Tensor
5880
5881In-place version of :meth:`~Tensor.tril`
5882""",
5883)
5884
5885add_docstr_all(
5886    "triu",
5887    r"""
5888triu(diagonal=0) -> Tensor
5889
5890See :func:`torch.triu`
5891""",
5892)
5893
5894add_docstr_all(
5895    "triu_",
5896    r"""
5897triu_(diagonal=0) -> Tensor
5898
5899In-place version of :meth:`~Tensor.triu`
5900""",
5901)
5902
5903add_docstr_all(
5904    "true_divide",
5905    r"""
5906true_divide(value) -> Tensor
5907
5908See :func:`torch.true_divide`
5909""",
5910)
5911
5912add_docstr_all(
5913    "true_divide_",
5914    r"""
5915true_divide_(value) -> Tensor
5916
5917In-place version of :meth:`~Tensor.true_divide_`
5918""",
5919)
5920
5921add_docstr_all(
5922    "trunc",
5923    r"""
5924trunc() -> Tensor
5925
5926See :func:`torch.trunc`
5927""",
5928)
5929
5930add_docstr_all(
5931    "fix",
5932    r"""
5933fix() -> Tensor
5934
5935See :func:`torch.fix`.
5936""",
5937)
5938
5939add_docstr_all(
5940    "trunc_",
5941    r"""
5942trunc_() -> Tensor
5943
5944In-place version of :meth:`~Tensor.trunc`
5945""",
5946)
5947
5948add_docstr_all(
5949    "fix_",
5950    r"""
5951fix_() -> Tensor
5952
5953In-place version of :meth:`~Tensor.fix`
5954""",
5955)
5956
5957add_docstr_all(
5958    "type",
5959    r"""
5960type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
5961Returns the type if `dtype` is not provided, else casts this object to
5962the specified type.
5963
5964If this is already of the correct type, no copy is performed and the
5965original object is returned.
5966
5967Args:
5968    dtype (dtype or string): The desired type
5969    non_blocking (bool): If ``True``, and the source is in pinned memory
5970        and destination is on the GPU or vice versa, the copy is performed
5971        asynchronously with respect to the host. Otherwise, the argument
5972        has no effect.
5973    **kwargs: For compatibility, may contain the key ``async`` in place of
5974        the ``non_blocking`` argument. The ``async`` arg is deprecated.
5975""",
5976)
5977
5978add_docstr_all(
5979    "type_as",
5980    r"""
5981type_as(tensor) -> Tensor
5982
5983Returns this tensor cast to the type of the given tensor.
5984
5985This is a no-op if the tensor is already of the correct type. This is
5986equivalent to ``self.type(tensor.type())``
5987
5988Args:
5989    tensor (Tensor): the tensor which has the desired type
5990""",
5991)
5992
5993add_docstr_all(
5994    "unfold",
5995    r"""
5996unfold(dimension, size, step) -> Tensor
5997
5998Returns a view of the original tensor which contains all slices of size :attr:`size` from
5999:attr:`self` tensor in the dimension :attr:`dimension`.
6000
6001Step between two slices is given by :attr:`step`.
6002
6003If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
6004dimension :attr:`dimension` in the returned tensor will be
6005`(sizedim - size) / step + 1`.
6006
6007An additional dimension of size :attr:`size` is appended in the returned tensor.
6008
6009Args:
6010    dimension (int): dimension in which unfolding happens
6011    size (int): the size of each slice that is unfolded
6012    step (int): the step between each slice
6013
6014Example::
6015
6016    >>> x = torch.arange(1., 8)
6017    >>> x
6018    tensor([ 1.,  2.,  3.,  4.,  5.,  6.,  7.])
6019    >>> x.unfold(0, 2, 1)
6020    tensor([[ 1.,  2.],
6021            [ 2.,  3.],
6022            [ 3.,  4.],
6023            [ 4.,  5.],
6024            [ 5.,  6.],
6025            [ 6.,  7.]])
6026    >>> x.unfold(0, 2, 2)
6027    tensor([[ 1.,  2.],
6028            [ 3.,  4.],
6029            [ 5.,  6.]])
6030""",
6031)
6032
6033add_docstr_all(
6034    "uniform_",
6035    r"""
6036uniform_(from=0, to=1, *, generator=None) -> Tensor
6037
6038Fills :attr:`self` tensor with numbers sampled from the continuous uniform
6039distribution:
6040
6041.. math::
6042    f(x) = \dfrac{1}{\text{to} - \text{from}}
6043""",
6044)
6045
6046add_docstr_all(
6047    "unsqueeze",
6048    r"""
6049unsqueeze(dim) -> Tensor
6050
6051See :func:`torch.unsqueeze`
6052""",
6053)
6054
6055add_docstr_all(
6056    "unsqueeze_",
6057    r"""
6058unsqueeze_(dim) -> Tensor
6059
6060In-place version of :meth:`~Tensor.unsqueeze`
6061""",
6062)
6063
6064add_docstr_all(
6065    "var",
6066    r"""
6067var(dim=None, *, correction=1, keepdim=False) -> Tensor
6068
6069See :func:`torch.var`
6070""",
6071)
6072
6073add_docstr_all(
6074    "vdot",
6075    r"""
6076vdot(other) -> Tensor
6077
6078See :func:`torch.vdot`
6079""",
6080)
6081
6082add_docstr_all(
6083    "view",
6084    r"""
6085view(*shape) -> Tensor
6086
6087Returns a new tensor with the same data as the :attr:`self` tensor but of a
6088different :attr:`shape`.
6089
6090The returned tensor shares the same data and must have the same number
6091of elements, but may have a different size. For a tensor to be viewed, the new
6092view size must be compatible with its original size and stride, i.e., each new
6093view dimension must either be a subspace of an original dimension, or only span
6094across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
6095contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
6096
6097.. math::
6098
6099  \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
6100
6101Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
6102without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
6103:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
6104returns a view if the shapes are compatible, and copies (equivalent to calling
6105:meth:`contiguous`) otherwise.
6106
6107Args:
6108    shape (torch.Size or int...): the desired size
6109
6110Example::
6111
6112    >>> x = torch.randn(4, 4)
6113    >>> x.size()
6114    torch.Size([4, 4])
6115    >>> y = x.view(16)
6116    >>> y.size()
6117    torch.Size([16])
6118    >>> z = x.view(-1, 8)  # the size -1 is inferred from other dimensions
6119    >>> z.size()
6120    torch.Size([2, 8])
6121
6122    >>> a = torch.randn(1, 2, 3, 4)
6123    >>> a.size()
6124    torch.Size([1, 2, 3, 4])
6125    >>> b = a.transpose(1, 2)  # Swaps 2nd and 3rd dimension
6126    >>> b.size()
6127    torch.Size([1, 3, 2, 4])
6128    >>> c = a.view(1, 3, 2, 4)  # Does not change tensor layout in memory
6129    >>> c.size()
6130    torch.Size([1, 3, 2, 4])
6131    >>> torch.equal(b, c)
6132    False
6133
6134
6135.. method:: view(dtype) -> Tensor
6136   :noindex:
6137
6138Returns a new tensor with the same data as the :attr:`self` tensor but of a
6139different :attr:`dtype`.
6140
6141If the element size of :attr:`dtype` is different than that of ``self.dtype``,
6142then the size of the last dimension of the output will be scaled
6143proportionally.  For instance, if :attr:`dtype` element size is twice that of
6144``self.dtype``, then each pair of elements in the last dimension of
6145:attr:`self` will be combined, and the size of the last dimension of the output
6146will be half that of :attr:`self`. If :attr:`dtype` element size is half that
6147of ``self.dtype``, then each element in the last dimension of :attr:`self` will
6148be split in two, and the size of the last dimension of the output will be
6149double that of :attr:`self`. For this to be possible, the following conditions
6150must be true:
6151
6152    * ``self.dim()`` must be greater than 0.
6153    * ``self.stride(-1)`` must be 1.
6154
6155Additionally, if the element size of :attr:`dtype` is greater than that of
6156``self.dtype``, the following conditions must be true as well:
6157
6158    * ``self.size(-1)`` must be divisible by the ratio between the element
6159      sizes of the dtypes.
6160    * ``self.storage_offset()`` must be divisible by the ratio between the
6161      element sizes of the dtypes.
6162    * The strides of all dimensions, except the last dimension, must be
6163      divisible by the ratio between the element sizes of the dtypes.
6164
6165If any of the above conditions are not met, an error is thrown.
6166
6167.. warning::
6168
6169    This overload is not supported by TorchScript, and using it in a Torchscript
6170    program will cause undefined behavior.
6171
6172
6173Args:
6174    dtype (:class:`torch.dtype`): the desired dtype
6175
6176Example::
6177
6178    >>> x = torch.randn(4, 4)
6179    >>> x
6180    tensor([[ 0.9482, -0.0310,  1.4999, -0.5316],
6181            [-0.1520,  0.7472,  0.5617, -0.8649],
6182            [-2.4724, -0.0334, -0.2976, -0.8499],
6183            [-0.2109,  1.9913, -0.9607, -0.6123]])
6184    >>> x.dtype
6185    torch.float32
6186
6187    >>> y = x.view(torch.int32)
6188    >>> y
6189    tensor([[ 1064483442, -1124191867,  1069546515, -1089989247],
6190            [-1105482831,  1061112040,  1057999968, -1084397505],
6191            [-1071760287, -1123489973, -1097310419, -1084649136],
6192            [-1101533110,  1073668768, -1082790149, -1088634448]],
6193        dtype=torch.int32)
6194    >>> y[0, 0] = 1000000000
6195    >>> x
6196    tensor([[ 0.0047, -0.0310,  1.4999, -0.5316],
6197            [-0.1520,  0.7472,  0.5617, -0.8649],
6198            [-2.4724, -0.0334, -0.2976, -0.8499],
6199            [-0.2109,  1.9913, -0.9607, -0.6123]])
6200
6201    >>> x.view(torch.cfloat)
6202    tensor([[ 0.0047-0.0310j,  1.4999-0.5316j],
6203            [-0.1520+0.7472j,  0.5617-0.8649j],
6204            [-2.4724-0.0334j, -0.2976-0.8499j],
6205            [-0.2109+1.9913j, -0.9607-0.6123j]])
6206    >>> x.view(torch.cfloat).size()
6207    torch.Size([4, 2])
6208
6209    >>> x.view(torch.uint8)
6210    tensor([[  0, 202, 154,  59, 182, 243, 253, 188, 185, 252, 191,  63, 240,  22,
6211               8, 191],
6212            [227, 165,  27, 190, 128,  72,  63,  63, 146, 203,  15,  63,  22, 106,
6213              93, 191],
6214            [205,  59,  30, 192, 112, 206,   8, 189,   7,  95, 152, 190,  12, 147,
6215              89, 191],
6216            [ 43, 246,  87, 190, 235, 226, 254,  63, 111, 240, 117, 191, 177, 191,
6217              28, 191]], dtype=torch.uint8)
6218    >>> x.view(torch.uint8).size()
6219    torch.Size([4, 16])
6220""",
6221)
6222
6223add_docstr_all(
6224    "view_as",
6225    r"""
6226view_as(other) -> Tensor
6227
6228View this tensor as the same size as :attr:`other`.
6229``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
6230
6231Please see :meth:`~Tensor.view` for more information about ``view``.
6232
6233Args:
6234    other (:class:`torch.Tensor`): The result tensor has the same size
6235        as :attr:`other`.
6236""",
6237)
6238
6239add_docstr_all(
6240    "expand",
6241    r"""
6242expand(*sizes) -> Tensor
6243
6244Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
6245to a larger size.
6246
6247Passing -1 as the size for a dimension means not changing the size of
6248that dimension.
6249
6250Tensor can be also expanded to a larger number of dimensions, and the
6251new ones will be appended at the front. For the new dimensions, the
6252size cannot be set to -1.
6253
6254Expanding a tensor does not allocate new memory, but only creates a
6255new view on the existing tensor where a dimension of size one is
6256expanded to a larger size by setting the ``stride`` to 0. Any dimension
6257of size 1 can be expanded to an arbitrary value without allocating new
6258memory.
6259
6260Args:
6261    *sizes (torch.Size or int...): the desired expanded size
6262
6263.. warning::
6264
6265    More than one element of an expanded tensor may refer to a single
6266    memory location. As a result, in-place operations (especially ones that
6267    are vectorized) may result in incorrect behavior. If you need to write
6268    to the tensors, please clone them first.
6269
6270Example::
6271
6272    >>> x = torch.tensor([[1], [2], [3]])
6273    >>> x.size()
6274    torch.Size([3, 1])
6275    >>> x.expand(3, 4)
6276    tensor([[ 1,  1,  1,  1],
6277            [ 2,  2,  2,  2],
6278            [ 3,  3,  3,  3]])
6279    >>> x.expand(-1, 4)   # -1 means not changing the size of that dimension
6280    tensor([[ 1,  1,  1,  1],
6281            [ 2,  2,  2,  2],
6282            [ 3,  3,  3,  3]])
6283""",
6284)
6285
6286add_docstr_all(
6287    "expand_as",
6288    r"""
6289expand_as(other) -> Tensor
6290
6291Expand this tensor to the same size as :attr:`other`.
6292``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
6293
6294Please see :meth:`~Tensor.expand` for more information about ``expand``.
6295
6296Args:
6297    other (:class:`torch.Tensor`): The result tensor has the same size
6298        as :attr:`other`.
6299""",
6300)
6301
6302add_docstr_all(
6303    "sum_to_size",
6304    r"""
6305sum_to_size(*size) -> Tensor
6306
6307Sum ``this`` tensor to :attr:`size`.
6308:attr:`size` must be broadcastable to ``this`` tensor size.
6309
6310Args:
6311    size (int...): a sequence of integers defining the shape of the output tensor.
6312""",
6313)
6314
6315
6316add_docstr_all(
6317    "zero_",
6318    r"""
6319zero_() -> Tensor
6320
6321Fills :attr:`self` tensor with zeros.
6322""",
6323)
6324
6325add_docstr_all(
6326    "matmul",
6327    r"""
6328matmul(tensor2) -> Tensor
6329
6330See :func:`torch.matmul`
6331""",
6332)
6333
6334add_docstr_all(
6335    "chunk",
6336    r"""
6337chunk(chunks, dim=0) -> List of Tensors
6338
6339See :func:`torch.chunk`
6340""",
6341)
6342
6343add_docstr_all(
6344    "unsafe_chunk",
6345    r"""
6346unsafe_chunk(chunks, dim=0) -> List of Tensors
6347
6348See :func:`torch.unsafe_chunk`
6349""",
6350)
6351
6352add_docstr_all(
6353    "unsafe_split",
6354    r"""
6355unsafe_split(split_size, dim=0) -> List of Tensors
6356
6357See :func:`torch.unsafe_split`
6358""",
6359)
6360
6361add_docstr_all(
6362    "tensor_split",
6363    r"""
6364tensor_split(indices_or_sections, dim=0) -> List of Tensors
6365
6366See :func:`torch.tensor_split`
6367""",
6368)
6369
6370add_docstr_all(
6371    "hsplit",
6372    r"""
6373hsplit(split_size_or_sections) -> List of Tensors
6374
6375See :func:`torch.hsplit`
6376""",
6377)
6378
6379add_docstr_all(
6380    "vsplit",
6381    r"""
6382vsplit(split_size_or_sections) -> List of Tensors
6383
6384See :func:`torch.vsplit`
6385""",
6386)
6387
6388add_docstr_all(
6389    "dsplit",
6390    r"""
6391dsplit(split_size_or_sections) -> List of Tensors
6392
6393See :func:`torch.dsplit`
6394""",
6395)
6396
6397add_docstr_all(
6398    "stft",
6399    r"""
6400stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor
6401
6402See :func:`torch.stft`
6403""",
6404)
6405
6406add_docstr_all(
6407    "istft",
6408    r"""
6409istft(n_fft, hop_length=None, win_length=None, window=None,
6410 center=True, normalized=False, onesided=True, length=None) -> Tensor
6411
6412See :func:`torch.istft`
6413""",
6414)
6415
6416add_docstr_all(
6417    "det",
6418    r"""
6419det() -> Tensor
6420
6421See :func:`torch.det`
6422""",
6423)
6424
6425add_docstr_all(
6426    "where",
6427    r"""
6428where(condition, y) -> Tensor
6429
6430``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
6431See :func:`torch.where`
6432""",
6433)
6434
6435add_docstr_all(
6436    "logdet",
6437    r"""
6438logdet() -> Tensor
6439
6440See :func:`torch.logdet`
6441""",
6442)
6443
6444add_docstr_all(
6445    "slogdet",
6446    r"""
6447slogdet() -> (Tensor, Tensor)
6448
6449See :func:`torch.slogdet`
6450""",
6451)
6452
6453add_docstr_all(
6454    "unbind",
6455    r"""
6456unbind(dim=0) -> seq
6457
6458See :func:`torch.unbind`
6459""",
6460)
6461
6462add_docstr_all(
6463    "pin_memory",
6464    r"""
6465pin_memory() -> Tensor
6466
6467Copies the tensor to pinned memory, if it's not already pinned.
6468""",
6469)
6470
6471add_docstr_all(
6472    "pinverse",
6473    r"""
6474pinverse() -> Tensor
6475
6476See :func:`torch.pinverse`
6477""",
6478)
6479
6480add_docstr_all(
6481    "index_add",
6482    r"""
6483index_add(dim, index, source, *, alpha=1) -> Tensor
6484
6485Out-of-place version of :meth:`torch.Tensor.index_add_`.
6486""",
6487)
6488
6489add_docstr_all(
6490    "index_copy",
6491    r"""
6492index_copy(dim, index, tensor2) -> Tensor
6493
6494Out-of-place version of :meth:`torch.Tensor.index_copy_`.
6495""",
6496)
6497
6498add_docstr_all(
6499    "index_fill",
6500    r"""
6501index_fill(dim, index, value) -> Tensor
6502
6503Out-of-place version of :meth:`torch.Tensor.index_fill_`.
6504""",
6505)
6506
6507add_docstr_all(
6508    "scatter",
6509    r"""
6510scatter(dim, index, src) -> Tensor
6511
6512Out-of-place version of :meth:`torch.Tensor.scatter_`
6513""",
6514)
6515
6516add_docstr_all(
6517    "scatter_add",
6518    r"""
6519scatter_add(dim, index, src) -> Tensor
6520
6521Out-of-place version of :meth:`torch.Tensor.scatter_add_`
6522""",
6523)
6524
6525add_docstr_all(
6526    "scatter_reduce",
6527    r"""
6528scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor
6529
6530Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
6531""",
6532)
6533
6534add_docstr_all(
6535    "masked_scatter",
6536    r"""
6537masked_scatter(mask, tensor) -> Tensor
6538
6539Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
6540
6541.. note::
6542
6543    The inputs :attr:`self` and :attr:`mask`
6544    :ref:`broadcast <broadcasting-semantics>`.
6545
6546Example:
6547
6548    >>> self = torch.tensor([0, 0, 0, 0, 0])
6549    >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
6550    >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
6551    >>> self.masked_scatter(mask, source)
6552    tensor([[0, 0, 0, 0, 1],
6553            [2, 3, 0, 4, 5]])
6554
6555""",
6556)
6557
6558add_docstr_all(
6559    "xlogy",
6560    r"""
6561xlogy(other) -> Tensor
6562
6563See :func:`torch.xlogy`
6564""",
6565)
6566
6567add_docstr_all(
6568    "xlogy_",
6569    r"""
6570xlogy_(other) -> Tensor
6571
6572In-place version of :meth:`~Tensor.xlogy`
6573""",
6574)
6575
6576add_docstr_all(
6577    "masked_fill",
6578    r"""
6579masked_fill(mask, value) -> Tensor
6580
6581Out-of-place version of :meth:`torch.Tensor.masked_fill_`
6582""",
6583)
6584
6585add_docstr_all(
6586    "grad",
6587    r"""
6588This attribute is ``None`` by default and becomes a Tensor the first time a call to
6589:func:`backward` computes gradients for ``self``.
6590The attribute will then contain the gradients computed and future calls to
6591:func:`backward` will accumulate (add) gradients into it.
6592""",
6593)
6594
6595add_docstr_all(
6596    "retain_grad",
6597    r"""
6598retain_grad() -> None
6599
6600Enables this Tensor to have their :attr:`grad` populated during
6601:func:`backward`. This is a no-op for leaf tensors.
6602""",
6603)
6604
6605add_docstr_all(
6606    "retains_grad",
6607    r"""
6608Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
6609populated during :func:`backward`, ``False`` otherwise.
6610""",
6611)
6612
6613add_docstr_all(
6614    "requires_grad",
6615    r"""
6616Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.
6617
6618.. note::
6619
6620    The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
6621    attribute will be populated, see :attr:`is_leaf` for more details.
6622
6623""",
6624)
6625
6626add_docstr_all(
6627    "is_leaf",
6628    r"""
6629All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
6630
6631For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
6632created by the user. This means that they are not the result of an operation and so
6633:attr:`grad_fn` is None.
6634
6635Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
6636To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
6637
6638Example::
6639
6640    >>> a = torch.rand(10, requires_grad=True)
6641    >>> a.is_leaf
6642    True
6643    >>> b = torch.rand(10, requires_grad=True).cuda()
6644    >>> b.is_leaf
6645    False
6646    # b was created by the operation that cast a cpu Tensor into a cuda Tensor
6647    >>> c = torch.rand(10, requires_grad=True) + 2
6648    >>> c.is_leaf
6649    False
6650    # c was created by the addition operation
6651    >>> d = torch.rand(10).cuda()
6652    >>> d.is_leaf
6653    True
6654    # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
6655    >>> e = torch.rand(10).cuda().requires_grad_()
6656    >>> e.is_leaf
6657    True
6658    # e requires gradients and has no operations creating it
6659    >>> f = torch.rand(10, requires_grad=True, device="cuda")
6660    >>> f.is_leaf
6661    True
6662    # f requires grad, has no operation creating it
6663
6664
6665""",
6666)
6667
6668add_docstr_all(
6669    "names",
6670    r"""
6671Stores names for each of this tensor's dimensions.
6672
6673``names[idx]`` corresponds to the name of tensor dimension ``idx``.
6674Names are either a string if the dimension is named or ``None`` if the
6675dimension is unnamed.
6676
6677Dimension names may contain characters or underscore. Furthermore, a dimension
6678name must be a valid Python variable name (i.e., does not start with underscore).
6679
6680Tensors may not have two named dimensions with the same name.
6681
6682.. warning::
6683    The named tensor API is experimental and subject to change.
6684
6685""",
6686)
6687
6688add_docstr_all(
6689    "is_cuda",
6690    r"""
6691Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
6692""",
6693)
6694
6695add_docstr_all(
6696    "is_cpu",
6697    r"""
6698Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise.
6699""",
6700)
6701
6702add_docstr_all(
6703    "is_xla",
6704    r"""
6705Is ``True`` if the Tensor is stored on an XLA device, ``False`` otherwise.
6706""",
6707)
6708
6709add_docstr_all(
6710    "is_ipu",
6711    r"""
6712Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise.
6713""",
6714)
6715
6716add_docstr_all(
6717    "is_xpu",
6718    r"""
6719Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise.
6720""",
6721)
6722
6723add_docstr_all(
6724    "is_quantized",
6725    r"""
6726Is ``True`` if the Tensor is quantized, ``False`` otherwise.
6727""",
6728)
6729
6730add_docstr_all(
6731    "is_meta",
6732    r"""
6733Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise.  Meta tensors
6734are like normal tensors, but they carry no data.
6735""",
6736)
6737
6738add_docstr_all(
6739    "is_mps",
6740    r"""
6741Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise.
6742""",
6743)
6744
6745add_docstr_all(
6746    "is_sparse",
6747    r"""
6748Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise.
6749""",
6750)
6751
6752add_docstr_all(
6753    "is_sparse_csr",
6754    r"""
6755Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.
6756""",
6757)
6758
6759add_docstr_all(
6760    "device",
6761    r"""
6762Is the :class:`torch.device` where this Tensor is.
6763""",
6764)
6765
6766add_docstr_all(
6767    "ndim",
6768    r"""
6769Alias for :meth:`~Tensor.dim()`
6770""",
6771)
6772
6773add_docstr_all(
6774    "itemsize",
6775    r"""
6776Alias for :meth:`~Tensor.element_size()`
6777""",
6778)
6779
6780add_docstr_all(
6781    "nbytes",
6782    r"""
6783Returns the number of bytes consumed by the "view" of elements of the Tensor
6784if the Tensor does not use sparse storage layout.
6785Defined to be :meth:`~Tensor.numel()` * :meth:`~Tensor.element_size()`
6786""",
6787)
6788
6789add_docstr_all(
6790    "T",
6791    r"""
6792Returns a view of this tensor with its dimensions reversed.
6793
6794If ``n`` is the number of dimensions in ``x``,
6795``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.
6796
6797.. warning::
6798    The use of :func:`Tensor.T` on tensors of dimension other than 2 to reverse their shape
6799    is deprecated and it will throw an error in a future release. Consider :attr:`~.Tensor.mT`
6800    to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse
6801    the dimensions of a tensor.
6802""",
6803)
6804
6805add_docstr_all(
6806    "H",
6807    r"""
6808Returns a view of a matrix (2-D tensor) conjugated and transposed.
6809
6810``x.H`` is equivalent to ``x.transpose(0, 1).conj()`` for complex matrices and
6811``x.transpose(0, 1)`` for real matrices.
6812
6813.. seealso::
6814
6815        :attr:`~.Tensor.mH`: An attribute that also works on batches of matrices.
6816""",
6817)
6818
6819add_docstr_all(
6820    "mT",
6821    r"""
6822Returns a view of this tensor with the last two dimensions transposed.
6823
6824``x.mT`` is equivalent to ``x.transpose(-2, -1)``.
6825""",
6826)
6827
6828add_docstr_all(
6829    "mH",
6830    r"""
6831Accessing this property is equivalent to calling :func:`adjoint`.
6832""",
6833)
6834
6835add_docstr_all(
6836    "adjoint",
6837    r"""
6838adjoint() -> Tensor
6839
6840Alias for :func:`adjoint`
6841""",
6842)
6843
6844add_docstr_all(
6845    "real",
6846    r"""
6847Returns a new tensor containing real values of the :attr:`self` tensor for a complex-valued input tensor.
6848The returned tensor and :attr:`self` share the same underlying storage.
6849
6850Returns :attr:`self` if :attr:`self` is a real-valued tensor tensor.
6851
6852Example::
6853    >>> x=torch.randn(4, dtype=torch.cfloat)
6854    >>> x
6855    tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
6856    >>> x.real
6857    tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
6858
6859""",
6860)
6861
6862add_docstr_all(
6863    "imag",
6864    r"""
6865Returns a new tensor containing imaginary values of the :attr:`self` tensor.
6866The returned tensor and :attr:`self` share the same underlying storage.
6867
6868.. warning::
6869    :func:`imag` is only supported for tensors with complex dtypes.
6870
6871Example::
6872    >>> x=torch.randn(4, dtype=torch.cfloat)
6873    >>> x
6874    tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
6875    >>> x.imag
6876    tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
6877
6878""",
6879)
6880
6881add_docstr_all(
6882    "as_subclass",
6883    r"""
6884as_subclass(cls) -> Tensor
6885
6886Makes a ``cls`` instance with the same data pointer as ``self``. Changes
6887in the output mirror changes in ``self``, and the output stays attached
6888to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
6889""",
6890)
6891
6892add_docstr_all(
6893    "crow_indices",
6894    r"""
6895crow_indices() -> IntTensor
6896
6897Returns the tensor containing the compressed row indices of the :attr:`self`
6898tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
6899The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
6900and of type ``int32`` or ``int64``. When using MKL routines such as sparse
6901matrix multiplication, it is necessary to use ``int32`` indexing in order
6902to avoid downcasting and potentially losing information.
6903
6904Example::
6905    >>> csr = torch.eye(5,5).to_sparse_csr()
6906    >>> csr.crow_indices()
6907    tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
6908
6909""",
6910)
6911
6912add_docstr_all(
6913    "col_indices",
6914    r"""
6915col_indices() -> IntTensor
6916
6917Returns the tensor containing the column indices of the :attr:`self`
6918tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
6919The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
6920and of type ``int32`` or ``int64``.  When using MKL routines such as sparse
6921matrix multiplication, it is necessary to use ``int32`` indexing in order
6922to avoid downcasting and potentially losing information.
6923
6924Example::
6925    >>> csr = torch.eye(5,5).to_sparse_csr()
6926    >>> csr.col_indices()
6927    tensor([0, 1, 2, 3, 4], dtype=torch.int32)
6928
6929""",
6930)
6931
6932add_docstr_all(
6933    "to_padded_tensor",
6934    r"""
6935to_padded_tensor(padding, output_size=None) -> Tensor
6936See :func:`to_padded_tensor`
6937""",
6938)
6939