1import torch 2from torch._C import _add_docstr, _special # type: ignore[attr-defined] 3from torch._torch_docs import common_args, multi_dim_common 4 5__all__ = [ 6 'airy_ai', 7 'bessel_j0', 8 'bessel_j1', 9 'bessel_y0', 10 'bessel_y1', 11 'chebyshev_polynomial_t', 12 'chebyshev_polynomial_u', 13 'chebyshev_polynomial_v', 14 'chebyshev_polynomial_w', 15 'digamma', 16 'entr', 17 'erf', 18 'erfc', 19 'erfcx', 20 'erfinv', 21 'exp2', 22 'expit', 23 'expm1', 24 'gammainc', 25 'gammaincc', 26 'gammaln', 27 'hermite_polynomial_h', 28 'hermite_polynomial_he', 29 'i0', 30 'i0e', 31 'i1', 32 'i1e', 33 'laguerre_polynomial_l', 34 'legendre_polynomial_p', 35 'log1p', 36 'log_ndtr', 37 'log_softmax', 38 'logit', 39 'logsumexp', 40 'modified_bessel_i0', 41 'modified_bessel_i1', 42 'modified_bessel_k0', 43 'modified_bessel_k1', 44 'multigammaln', 45 'ndtr', 46 'ndtri', 47 'polygamma', 48 'psi', 49 'round', 50 'shifted_chebyshev_polynomial_t', 51 'shifted_chebyshev_polynomial_u', 52 'shifted_chebyshev_polynomial_v', 53 'shifted_chebyshev_polynomial_w', 54 'scaled_modified_bessel_k0', 55 'scaled_modified_bessel_k1', 56 'sinc', 57 'softmax', 58 'spherical_bessel_j0', 59 'xlog1py', 60 'xlogy', 61 'zeta', 62] 63 64Tensor = torch.Tensor 65 66entr = _add_docstr(_special.special_entr, 67 r""" 68entr(input, *, out=None) -> Tensor 69Computes the entropy on :attr:`input` (as defined below), elementwise. 70 71.. math:: 72 \begin{align} 73 \text{entr(x)} = \begin{cases} 74 -x * \ln(x) & x > 0 \\ 75 0 & x = 0.0 \\ 76 -\infty & x < 0 77 \end{cases} 78 \end{align} 79""" + """ 80 81Args: 82 input (Tensor): the input tensor. 83 84Keyword args: 85 out (Tensor, optional): the output tensor. 86 87Example:: 88 >>> a = torch.arange(-0.5, 1, 0.5) 89 >>> a 90 tensor([-0.5000, 0.0000, 0.5000]) 91 >>> torch.special.entr(a) 92 tensor([ -inf, 0.0000, 0.3466]) 93""") 94 95psi = _add_docstr(_special.special_psi, 96 r""" 97psi(input, *, out=None) -> Tensor 98 99Alias for :func:`torch.special.digamma`. 100""") 101 102digamma = _add_docstr(_special.special_digamma, 103 r""" 104digamma(input, *, out=None) -> Tensor 105 106Computes the logarithmic derivative of the gamma function on `input`. 107 108.. math:: 109 \digamma(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} 110""" + r""" 111Args: 112 input (Tensor): the tensor to compute the digamma function on 113 114Keyword args: 115 {out} 116 117.. note:: This function is similar to SciPy's `scipy.special.digamma`. 118 119.. note:: From PyTorch 1.8 onwards, the digamma function returns `-Inf` for `0`. 120 Previously it returned `NaN` for `0`. 121 122Example:: 123 124 >>> a = torch.tensor([1, 0.5]) 125 >>> torch.special.digamma(a) 126 tensor([-0.5772, -1.9635]) 127 128""".format(**common_args)) 129 130gammaln = _add_docstr(_special.special_gammaln, 131 r""" 132gammaln(input, *, out=None) -> Tensor 133 134Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. 135 136.. math:: 137 \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|) 138""" + """ 139Args: 140 {input} 141 142Keyword args: 143 {out} 144 145Example:: 146 147 >>> a = torch.arange(0.5, 2, 0.5) 148 >>> torch.special.gammaln(a) 149 tensor([ 0.5724, 0.0000, -0.1208]) 150 151""".format(**common_args)) 152 153polygamma = _add_docstr(_special.special_polygamma, 154 r""" 155polygamma(n, input, *, out=None) -> Tensor 156 157Computes the :math:`n^{th}` derivative of the digamma function on :attr:`input`. 158:math:`n \geq 0` is called the order of the polygamma function. 159 160.. math:: 161 \psi^{(n)}(x) = \frac{d^{(n)}}{dx^{(n)}} \psi(x) 162 163.. note:: 164 This function is implemented only for nonnegative integers :math:`n \geq 0`. 165""" + """ 166Args: 167 n (int): the order of the polygamma function 168 {input} 169 170Keyword args: 171 {out} 172 173Example:: 174 >>> a = torch.tensor([1, 0.5]) 175 >>> torch.special.polygamma(1, a) 176 tensor([1.64493, 4.9348]) 177 >>> torch.special.polygamma(2, a) 178 tensor([ -2.4041, -16.8288]) 179 >>> torch.special.polygamma(3, a) 180 tensor([ 6.4939, 97.4091]) 181 >>> torch.special.polygamma(4, a) 182 tensor([ -24.8863, -771.4742]) 183""".format(**common_args)) 184 185erf = _add_docstr(_special.special_erf, 186 r""" 187erf(input, *, out=None) -> Tensor 188 189Computes the error function of :attr:`input`. The error function is defined as follows: 190 191.. math:: 192 \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt 193""" + r""" 194Args: 195 {input} 196 197Keyword args: 198 {out} 199 200Example:: 201 202 >>> torch.special.erf(torch.tensor([0, -1., 10.])) 203 tensor([ 0.0000, -0.8427, 1.0000]) 204""".format(**common_args)) 205 206erfc = _add_docstr(_special.special_erfc, 207 r""" 208erfc(input, *, out=None) -> Tensor 209 210Computes the complementary error function of :attr:`input`. 211The complementary error function is defined as follows: 212 213.. math:: 214 \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt 215""" + r""" 216Args: 217 {input} 218 219Keyword args: 220 {out} 221 222Example:: 223 224 >>> torch.special.erfc(torch.tensor([0, -1., 10.])) 225 tensor([ 1.0000, 1.8427, 0.0000]) 226""".format(**common_args)) 227 228erfcx = _add_docstr(_special.special_erfcx, 229 r""" 230erfcx(input, *, out=None) -> Tensor 231 232Computes the scaled complementary error function for each element of :attr:`input`. 233The scaled complementary error function is defined as follows: 234 235.. math:: 236 \mathrm{erfcx}(x) = e^{x^2} \mathrm{erfc}(x) 237""" + r""" 238 239""" + r""" 240Args: 241 {input} 242 243Keyword args: 244 {out} 245 246Example:: 247 248 >>> torch.special.erfcx(torch.tensor([0, -1., 10.])) 249 tensor([ 1.0000, 5.0090, 0.0561]) 250""".format(**common_args)) 251 252erfinv = _add_docstr(_special.special_erfinv, 253 r""" 254erfinv(input, *, out=None) -> Tensor 255 256Computes the inverse error function of :attr:`input`. 257The inverse error function is defined in the range :math:`(-1, 1)` as: 258 259.. math:: 260 \mathrm{erfinv}(\mathrm{erf}(x)) = x 261""" + r""" 262 263Args: 264 {input} 265 266Keyword args: 267 {out} 268 269Example:: 270 271 >>> torch.special.erfinv(torch.tensor([0, 0.5, -1.])) 272 tensor([ 0.0000, 0.4769, -inf]) 273""".format(**common_args)) 274 275logit = _add_docstr(_special.special_logit, 276 r""" 277logit(input, eps=None, *, out=None) -> Tensor 278 279Returns a new tensor with the logit of the elements of :attr:`input`. 280:attr:`input` is clamped to [eps, 1 - eps] when eps is not None. 281When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN. 282 283.. math:: 284 \begin{align} 285 y_{i} &= \ln(\frac{z_{i}}{1 - z_{i}}) \\ 286 z_{i} &= \begin{cases} 287 x_{i} & \text{if eps is None} \\ 288 \text{eps} & \text{if } x_{i} < \text{eps} \\ 289 x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\ 290 1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps} 291 \end{cases} 292 \end{align} 293""" + r""" 294Args: 295 {input} 296 eps (float, optional): the epsilon for input clamp bound. Default: ``None`` 297 298Keyword args: 299 {out} 300 301Example:: 302 303 >>> a = torch.rand(5) 304 >>> a 305 tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516]) 306 >>> torch.special.logit(a, eps=1e-6) 307 tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261]) 308""".format(**common_args)) 309 310logsumexp = _add_docstr(_special.special_logsumexp, 311 r""" 312logsumexp(input, dim, keepdim=False, *, out=None) 313 314Alias for :func:`torch.logsumexp`. 315""".format(**multi_dim_common)) 316 317expit = _add_docstr(_special.special_expit, 318 r""" 319expit(input, *, out=None) -> Tensor 320 321Computes the expit (also known as the logistic sigmoid function) of the elements of :attr:`input`. 322 323.. math:: 324 \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} 325""" + r""" 326Args: 327 {input} 328 329Keyword args: 330 {out} 331 332Example:: 333 334 >>> t = torch.randn(4) 335 >>> t 336 tensor([ 0.9213, 1.0887, -0.8858, -1.7683]) 337 >>> torch.special.expit(t) 338 tensor([ 0.7153, 0.7481, 0.2920, 0.1458]) 339""".format(**common_args)) 340 341exp2 = _add_docstr(_special.special_exp2, 342 r""" 343exp2(input, *, out=None) -> Tensor 344 345Computes the base two exponential function of :attr:`input`. 346 347.. math:: 348 y_{i} = 2^{x_{i}} 349 350""" + r""" 351Args: 352 {input} 353 354Keyword args: 355 {out} 356 357Example:: 358 359 >>> torch.special.exp2(torch.tensor([0, math.log2(2.), 3, 4])) 360 tensor([ 1., 2., 8., 16.]) 361""".format(**common_args)) 362 363expm1 = _add_docstr(_special.special_expm1, 364 r""" 365expm1(input, *, out=None) -> Tensor 366 367Computes the exponential of the elements minus 1 368of :attr:`input`. 369 370.. math:: 371 y_{i} = e^{x_{i}} - 1 372 373.. note:: This function provides greater precision than exp(x) - 1 for small values of x. 374 375""" + r""" 376Args: 377 {input} 378 379Keyword args: 380 {out} 381 382Example:: 383 384 >>> torch.special.expm1(torch.tensor([0, math.log(2.)])) 385 tensor([ 0., 1.]) 386""".format(**common_args)) 387 388xlog1py = _add_docstr(_special.special_xlog1py, 389 r""" 390xlog1py(input, other, *, out=None) -> Tensor 391 392Computes ``input * log1p(other)`` with the following cases. 393 394.. math:: 395 \text{out}_{i} = \begin{cases} 396 \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ 397 0 & \text{if } \text{input}_{i} = 0.0 \text{ and } \text{other}_{i} != \text{NaN} \\ 398 \text{input}_{i} * \text{log1p}(\text{other}_{i})& \text{otherwise} 399 \end{cases} 400 401Similar to SciPy's `scipy.special.xlog1py`. 402 403""" + r""" 404 405Args: 406 input (Number or Tensor) : Multiplier 407 other (Number or Tensor) : Argument 408 409.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. 410 411Keyword args: 412 {out} 413 414Example:: 415 416 >>> x = torch.zeros(5,) 417 >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) 418 >>> torch.special.xlog1py(x, y) 419 tensor([0., 0., 0., 0., nan]) 420 >>> x = torch.tensor([1, 2, 3]) 421 >>> y = torch.tensor([3, 2, 1]) 422 >>> torch.special.xlog1py(x, y) 423 tensor([1.3863, 2.1972, 2.0794]) 424 >>> torch.special.xlog1py(x, 4) 425 tensor([1.6094, 3.2189, 4.8283]) 426 >>> torch.special.xlog1py(2, y) 427 tensor([2.7726, 2.1972, 1.3863]) 428""".format(**common_args)) 429 430xlogy = _add_docstr(_special.special_xlogy, 431 r""" 432xlogy(input, other, *, out=None) -> Tensor 433 434Computes ``input * log(other)`` with the following cases. 435 436.. math:: 437 \text{out}_{i} = \begin{cases} 438 \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ 439 0 & \text{if } \text{input}_{i} = 0.0 \\ 440 \text{input}_{i} * \log{(\text{other}_{i})} & \text{otherwise} 441 \end{cases} 442 443Similar to SciPy's `scipy.special.xlogy`. 444 445""" + r""" 446 447Args: 448 input (Number or Tensor) : Multiplier 449 other (Number or Tensor) : Argument 450 451.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. 452 453Keyword args: 454 {out} 455 456Example:: 457 458 >>> x = torch.zeros(5,) 459 >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) 460 >>> torch.special.xlogy(x, y) 461 tensor([0., 0., 0., 0., nan]) 462 >>> x = torch.tensor([1, 2, 3]) 463 >>> y = torch.tensor([3, 2, 1]) 464 >>> torch.special.xlogy(x, y) 465 tensor([1.0986, 1.3863, 0.0000]) 466 >>> torch.special.xlogy(x, 4) 467 tensor([1.3863, 2.7726, 4.1589]) 468 >>> torch.special.xlogy(2, y) 469 tensor([2.1972, 1.3863, 0.0000]) 470""".format(**common_args)) 471 472i0 = _add_docstr(_special.special_i0, 473 r""" 474i0(input, *, out=None) -> Tensor 475 476Computes the zeroth order modified Bessel function of the first kind for each element of :attr:`input`. 477 478.. math:: 479 \text{out}_{i} = I_0(\text{input}_{i}) = \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} 480 481""" + r""" 482Args: 483 input (Tensor): the input tensor 484 485Keyword args: 486 {out} 487 488Example:: 489 490 >>> torch.i0(torch.arange(5, dtype=torch.float32)) 491 tensor([ 1.0000, 1.2661, 2.2796, 4.8808, 11.3019]) 492 493""".format(**common_args)) 494 495i0e = _add_docstr(_special.special_i0e, 496 r""" 497i0e(input, *, out=None) -> Tensor 498Computes the exponentially scaled zeroth order modified Bessel function of the first kind (as defined below) 499for each element of :attr:`input`. 500 501.. math:: 502 \text{out}_{i} = \exp(-|x|) * i0(x) = \exp(-|x|) * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} 503 504""" + r""" 505Args: 506 {input} 507 508Keyword args: 509 {out} 510 511Example:: 512 >>> torch.special.i0e(torch.arange(5, dtype=torch.float32)) 513 tensor([1.0000, 0.4658, 0.3085, 0.2430, 0.2070]) 514""".format(**common_args)) 515 516i1 = _add_docstr(_special.special_i1, 517 r""" 518i1(input, *, out=None) -> Tensor 519Computes the first order modified Bessel function of the first kind (as defined below) 520for each element of :attr:`input`. 521 522.. math:: 523 \text{out}_{i} = \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} 524 525""" + r""" 526Args: 527 {input} 528 529Keyword args: 530 {out} 531 532Example:: 533 >>> torch.special.i1(torch.arange(5, dtype=torch.float32)) 534 tensor([0.0000, 0.5652, 1.5906, 3.9534, 9.7595]) 535""".format(**common_args)) 536 537i1e = _add_docstr(_special.special_i1e, 538 r""" 539i1e(input, *, out=None) -> Tensor 540Computes the exponentially scaled first order modified Bessel function of the first kind (as defined below) 541for each element of :attr:`input`. 542 543.. math:: 544 \text{out}_{i} = \exp(-|x|) * i1(x) = 545 \exp(-|x|) * \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} 546 547""" + r""" 548Args: 549 {input} 550 551Keyword args: 552 {out} 553 554Example:: 555 >>> torch.special.i1e(torch.arange(5, dtype=torch.float32)) 556 tensor([0.0000, 0.2079, 0.2153, 0.1968, 0.1788]) 557""".format(**common_args)) 558 559ndtr = _add_docstr(_special.special_ndtr, 560 r""" 561ndtr(input, *, out=None) -> Tensor 562Computes the area under the standard Gaussian probability density function, 563integrated from minus infinity to :attr:`input`, elementwise. 564 565.. math:: 566 \text{ndtr}(x) = \frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt 567 568""" + r""" 569Args: 570 {input} 571 572Keyword args: 573 {out} 574 575Example:: 576 >>> torch.special.ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) 577 tensor([0.0013, 0.0228, 0.1587, 0.5000, 0.8413, 0.9772, 0.9987]) 578""".format(**common_args)) 579 580ndtri = _add_docstr(_special.special_ndtri, 581 r""" 582ndtri(input, *, out=None) -> Tensor 583Computes the argument, x, for which the area under the Gaussian probability density function 584(integrated from minus infinity to x) is equal to :attr:`input`, elementwise. 585 586.. math:: 587 \text{ndtri}(p) = \sqrt{2}\text{erf}^{-1}(2p - 1) 588 589.. note:: 590 Also known as quantile function for Normal Distribution. 591 592""" + r""" 593Args: 594 {input} 595 596Keyword args: 597 {out} 598 599Example:: 600 >>> torch.special.ndtri(torch.tensor([0, 0.25, 0.5, 0.75, 1])) 601 tensor([ -inf, -0.6745, 0.0000, 0.6745, inf]) 602""".format(**common_args)) 603 604log_ndtr = _add_docstr(_special.special_log_ndtr, 605 r""" 606log_ndtr(input, *, out=None) -> Tensor 607Computes the log of the area under the standard Gaussian probability density function, 608integrated from minus infinity to :attr:`input`, elementwise. 609 610.. math:: 611 \text{log\_ndtr}(x) = \log\left(\frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt \right) 612 613""" + r""" 614Args: 615 {input} 616 617Keyword args: 618 {out} 619 620Example:: 621 >>> torch.special.log_ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) 622 tensor([-6.6077 -3.7832 -1.841 -0.6931 -0.1728 -0.023 -0.0014]) 623""".format(**common_args)) 624 625log1p = _add_docstr(_special.special_log1p, 626 r""" 627log1p(input, *, out=None) -> Tensor 628 629Alias for :func:`torch.log1p`. 630""") 631 632sinc = _add_docstr(_special.special_sinc, 633 r""" 634sinc(input, *, out=None) -> Tensor 635 636Computes the normalized sinc of :attr:`input.` 637 638.. math:: 639 \text{out}_{i} = 640 \begin{cases} 641 1, & \text{if}\ \text{input}_{i}=0 \\ 642 \sin(\pi \text{input}_{i}) / (\pi \text{input}_{i}), & \text{otherwise} 643 \end{cases} 644""" + r""" 645 646Args: 647 {input} 648 649Keyword args: 650 {out} 651 652Example:: 653 >>> t = torch.randn(4) 654 >>> t 655 tensor([ 0.2252, -0.2948, 1.0267, -1.1566]) 656 >>> torch.special.sinc(t) 657 tensor([ 0.9186, 0.8631, -0.0259, -0.1300]) 658""".format(**common_args)) 659 660round = _add_docstr(_special.special_round, 661 r""" 662round(input, *, out=None) -> Tensor 663 664Alias for :func:`torch.round`. 665""") 666 667softmax = _add_docstr(_special.special_softmax, 668 r""" 669softmax(input, dim, *, dtype=None) -> Tensor 670 671Computes the softmax function. 672 673Softmax is defined as: 674 675:math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}` 676 677It is applied to all slices along dim, and will re-scale them so that the elements 678lie in the range `[0, 1]` and sum to 1. 679 680Args: 681 input (Tensor): input 682 dim (int): A dimension along which softmax will be computed. 683 dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. 684 If specified, the input tensor is cast to :attr:`dtype` before the operation 685 is performed. This is useful for preventing data type overflows. Default: None. 686 687Examples:: 688 >>> t = torch.ones(2, 2) 689 >>> torch.special.softmax(t, 0) 690 tensor([[0.5000, 0.5000], 691 [0.5000, 0.5000]]) 692 693""") 694 695log_softmax = _add_docstr(_special.special_log_softmax, 696 r""" 697log_softmax(input, dim, *, dtype=None) -> Tensor 698 699Computes softmax followed by a logarithm. 700 701While mathematically equivalent to log(softmax(x)), doing these two 702operations separately is slower and numerically unstable. This function 703is computed as: 704 705.. math:: 706 \text{log\_softmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right) 707""" + r""" 708 709Args: 710 input (Tensor): input 711 dim (int): A dimension along which log_softmax will be computed. 712 dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. 713 If specified, the input tensor is cast to :attr:`dtype` before the operation 714 is performed. This is useful for preventing data type overflows. Default: None. 715 716Example:: 717 >>> t = torch.ones(2, 2) 718 >>> torch.special.log_softmax(t, 0) 719 tensor([[-0.6931, -0.6931], 720 [-0.6931, -0.6931]]) 721""") 722 723zeta = _add_docstr(_special.special_zeta, 724 r""" 725zeta(input, other, *, out=None) -> Tensor 726 727Computes the Hurwitz zeta function, elementwise. 728 729.. math:: 730 \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x} 731 732""" + r""" 733Args: 734 input (Tensor): the input tensor corresponding to `x`. 735 other (Tensor): the input tensor corresponding to `q`. 736 737.. note:: 738 The Riemann zeta function corresponds to the case when `q = 1` 739 740Keyword args: 741 {out} 742 743Example:: 744 >>> x = torch.tensor([2., 4.]) 745 >>> torch.special.zeta(x, 1) 746 tensor([1.6449, 1.0823]) 747 >>> torch.special.zeta(x, torch.tensor([1., 2.])) 748 tensor([1.6449, 0.0823]) 749 >>> torch.special.zeta(2, torch.tensor([1., 2.])) 750 tensor([1.6449, 0.6449]) 751""".format(**common_args)) 752 753multigammaln = _add_docstr(_special.special_multigammaln, 754 r""" 755multigammaln(input, p, *, out=None) -> Tensor 756 757Computes the `multivariate log-gamma function 758<https://en.wikipedia.org/wiki/Multivariate_gamma_function>`_ with dimension 759:math:`p` element-wise, given by 760 761.. math:: 762 \log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right) 763 764where :math:`C = \log(\pi) \cdot \frac{p (p - 1)}{4}` and :math:`\Gamma(-)` is the Gamma function. 765 766All elements must be greater than :math:`\frac{p - 1}{2}`, otherwise the behavior is undefiend. 767""" + """ 768 769Args: 770 input (Tensor): the tensor to compute the multivariate log-gamma function 771 p (int): the number of dimensions 772 773Keyword args: 774 {out} 775 776Example:: 777 778 >>> a = torch.empty(2, 3).uniform_(1, 2) 779 >>> a 780 tensor([[1.6835, 1.8474, 1.1929], 781 [1.0475, 1.7162, 1.4180]]) 782 >>> torch.special.multigammaln(a, 2) 783 tensor([[0.3928, 0.4007, 0.7586], 784 [1.0311, 0.3901, 0.5049]]) 785""".format(**common_args)) 786 787gammainc = _add_docstr(_special.special_gammainc, 788 r""" 789gammainc(input, other, *, out=None) -> Tensor 790 791Computes the regularized lower incomplete gamma function: 792 793.. math:: 794 \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_0^{\text{other}_i} t^{\text{input}_i-1} e^{-t} dt 795 796where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive 797and at least one is strictly positive. 798If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. 799:math:`\Gamma(\cdot)` in the equation above is the gamma function, 800 801.. math:: 802 \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. 803 804See :func:`torch.special.gammaincc` and :func:`torch.special.gammaln` for related functions. 805 806Supports :ref:`broadcasting to a common shape <broadcasting-semantics>` 807and float inputs. 808 809.. note:: 810 The backward pass with respect to :attr:`input` is not yet supported. 811 Please open an issue on PyTorch's Github to request it. 812 813""" + r""" 814Args: 815 input (Tensor): the first non-negative input tensor 816 other (Tensor): the second non-negative input tensor 817 818Keyword args: 819 {out} 820 821Example:: 822 823 >>> a1 = torch.tensor([4.0]) 824 >>> a2 = torch.tensor([3.0, 4.0, 5.0]) 825 >>> a = torch.special.gammaincc(a1, a2) 826 tensor([0.3528, 0.5665, 0.7350]) 827 tensor([0.3528, 0.5665, 0.7350]) 828 >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) 829 tensor([1., 1., 1.]) 830 831""".format(**common_args)) 832 833gammaincc = _add_docstr(_special.special_gammaincc, 834 r""" 835gammaincc(input, other, *, out=None) -> Tensor 836 837Computes the regularized upper incomplete gamma function: 838 839.. math:: 840 \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_{\text{other}_i}^{\infty} t^{\text{input}_i-1} e^{-t} dt 841 842where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive 843and at least one is strictly positive. 844If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. 845:math:`\Gamma(\cdot)` in the equation above is the gamma function, 846 847.. math:: 848 \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. 849 850See :func:`torch.special.gammainc` and :func:`torch.special.gammaln` for related functions. 851 852Supports :ref:`broadcasting to a common shape <broadcasting-semantics>` 853and float inputs. 854 855.. note:: 856 The backward pass with respect to :attr:`input` is not yet supported. 857 Please open an issue on PyTorch's Github to request it. 858 859""" + r""" 860Args: 861 input (Tensor): the first non-negative input tensor 862 other (Tensor): the second non-negative input tensor 863 864Keyword args: 865 {out} 866 867Example:: 868 869 >>> a1 = torch.tensor([4.0]) 870 >>> a2 = torch.tensor([3.0, 4.0, 5.0]) 871 >>> a = torch.special.gammaincc(a1, a2) 872 tensor([0.6472, 0.4335, 0.2650]) 873 >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) 874 tensor([1., 1., 1.]) 875 876""".format(**common_args)) 877 878airy_ai = _add_docstr(_special.special_airy_ai, 879 r""" 880airy_ai(input, *, out=None) -> Tensor 881 882Airy function :math:`\text{Ai}\left(\text{input}\right)`. 883 884""" + r""" 885Args: 886 {input} 887 888Keyword args: 889 {out} 890""".format(**common_args)) 891 892bessel_j0 = _add_docstr(_special.special_bessel_j0, 893 r""" 894bessel_j0(input, *, out=None) -> Tensor 895 896Bessel function of the first kind of order :math:`0`. 897 898""" + r""" 899Args: 900 {input} 901 902Keyword args: 903 {out} 904""".format(**common_args)) 905 906bessel_j1 = _add_docstr(_special.special_bessel_j1, 907 r""" 908bessel_j1(input, *, out=None) -> Tensor 909 910Bessel function of the first kind of order :math:`1`. 911 912""" + r""" 913Args: 914 {input} 915 916Keyword args: 917 {out} 918""".format(**common_args)) 919 920bessel_y0 = _add_docstr(_special.special_bessel_y0, 921 r""" 922bessel_y0(input, *, out=None) -> Tensor 923 924Bessel function of the second kind of order :math:`0`. 925 926""" + r""" 927Args: 928 {input} 929 930Keyword args: 931 {out} 932""".format(**common_args)) 933 934bessel_y1 = _add_docstr(_special.special_bessel_y1, 935 r""" 936bessel_y1(input, *, out=None) -> Tensor 937 938Bessel function of the second kind of order :math:`1`. 939 940""" + r""" 941Args: 942 {input} 943 944Keyword args: 945 {out} 946""".format(**common_args)) 947 948chebyshev_polynomial_t = _add_docstr(_special.special_chebyshev_polynomial_t, 949 r""" 950chebyshev_polynomial_t(input, n, *, out=None) -> Tensor 951 952Chebyshev polynomial of the first kind :math:`T_{n}(\text{input})`. 953 954If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` 955is returned. If :math:`n < 6` or :math:`|\text{input}| > 1` the recursion: 956 957.. math:: 958 T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) 959 960is evaluated. Otherwise, the explicit trigonometric formula: 961 962.. math:: 963 T_{n}(\text{input}) = \text{cos}(n \times \text{arccos}(x)) 964 965is evaluated. 966 967""" + r""" 968Args: 969 {input} 970 n (Tensor): Degree of the polynomial. 971 972Keyword args: 973 {out} 974""".format(**common_args)) 975 976chebyshev_polynomial_u = _add_docstr(_special.special_chebyshev_polynomial_u, 977 r""" 978chebyshev_polynomial_t(input, n, *, out=None) -> Tensor 979 980Chebyshev polynomial of the second kind :math:`U_{n}(\text{input})`. 981 982If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, 983:math:`2 \times \text{input}` is returned. If :math:`n < 6` or 984:math:`|\text{input}| > 1`, the recursion: 985 986.. math:: 987 T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) 988 989is evaluated. Otherwise, the explicit trigonometric formula: 990 991.. math:: 992 \frac{\text{sin}((n + 1) \times \text{arccos}(\text{input}))}{\text{sin}(\text{arccos}(\text{input}))} 993 994is evaluated. 995 996""" + r""" 997Args: 998 {input} 999 n (Tensor): Degree of the polynomial. 1000 1001Keyword args: 1002 {out} 1003""".format(**common_args)) 1004 1005chebyshev_polynomial_v = _add_docstr(_special.special_chebyshev_polynomial_v, 1006 r""" 1007chebyshev_polynomial_v(input, n, *, out=None) -> Tensor 1008 1009Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. 1010 1011""" + r""" 1012Args: 1013 {input} 1014 n (Tensor): Degree of the polynomial. 1015 1016Keyword args: 1017 {out} 1018""".format(**common_args)) 1019 1020chebyshev_polynomial_w = _add_docstr(_special.special_chebyshev_polynomial_w, 1021 r""" 1022chebyshev_polynomial_w(input, n, *, out=None) -> Tensor 1023 1024Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. 1025 1026""" + r""" 1027Args: 1028 {input} 1029 n (Tensor): Degree of the polynomial. 1030 1031Keyword args: 1032 {out} 1033""".format(**common_args)) 1034 1035hermite_polynomial_h = _add_docstr(_special.special_hermite_polynomial_h, 1036 r""" 1037hermite_polynomial_h(input, n, *, out=None) -> Tensor 1038 1039Physicist's Hermite polynomial :math:`H_{n}(\text{input})`. 1040 1041If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` 1042is returned. Otherwise, the recursion: 1043 1044.. math:: 1045 H_{n + 1}(\text{input}) = 2 \times \text{input} \times H_{n}(\text{input}) - H_{n - 1}(\text{input}) 1046 1047is evaluated. 1048 1049""" + r""" 1050Args: 1051 {input} 1052 n (Tensor): Degree of the polynomial. 1053 1054Keyword args: 1055 {out} 1056""".format(**common_args)) 1057 1058hermite_polynomial_he = _add_docstr(_special.special_hermite_polynomial_he, 1059 r""" 1060hermite_polynomial_he(input, n, *, out=None) -> Tensor 1061 1062Probabilist's Hermite polynomial :math:`He_{n}(\text{input})`. 1063 1064If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` 1065is returned. Otherwise, the recursion: 1066 1067.. math:: 1068 He_{n + 1}(\text{input}) = 2 \times \text{input} \times He_{n}(\text{input}) - He_{n - 1}(\text{input}) 1069 1070is evaluated. 1071 1072""" + r""" 1073Args: 1074 {input} 1075 n (Tensor): Degree of the polynomial. 1076 1077Keyword args: 1078 {out} 1079""".format(**common_args)) 1080 1081laguerre_polynomial_l = _add_docstr(_special.special_laguerre_polynomial_l, 1082 r""" 1083laguerre_polynomial_l(input, n, *, out=None) -> Tensor 1084 1085Laguerre polynomial :math:`L_{n}(\text{input})`. 1086 1087If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` 1088is returned. Otherwise, the recursion: 1089 1090.. math:: 1091 L_{n + 1}(\text{input}) = 2 \times \text{input} \times L_{n}(\text{input}) - L_{n - 1}(\text{input}) 1092 1093is evaluated. 1094 1095""" + r""" 1096Args: 1097 {input} 1098 n (Tensor): Degree of the polynomial. 1099 1100Keyword args: 1101 {out} 1102""".format(**common_args)) 1103 1104legendre_polynomial_p = _add_docstr(_special.special_legendre_polynomial_p, 1105 r""" 1106legendre_polynomial_p(input, n, *, out=None) -> Tensor 1107 1108Legendre polynomial :math:`P_{n}(\text{input})`. 1109 1110If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` 1111is returned. Otherwise, the recursion: 1112 1113.. math:: 1114 P_{n + 1}(\text{input}) = 2 \times \text{input} \times P_{n}(\text{input}) - P_{n - 1}(\text{input}) 1115 1116is evaluated. 1117 1118""" + r""" 1119Args: 1120 {input} 1121 n (Tensor): Degree of the polynomial. 1122 1123Keyword args: 1124 {out} 1125""".format(**common_args)) 1126 1127modified_bessel_i0 = _add_docstr(_special.special_modified_bessel_i0, 1128 r""" 1129modified_bessel_i0(input, *, out=None) -> Tensor 1130 1131Modified Bessel function of the first kind of order :math:`0`. 1132 1133""" + r""" 1134Args: 1135 {input} 1136 1137Keyword args: 1138 {out} 1139""".format(**common_args)) 1140 1141modified_bessel_i1 = _add_docstr(_special.special_modified_bessel_i1, 1142 r""" 1143modified_bessel_i1(input, *, out=None) -> Tensor 1144 1145Modified Bessel function of the first kind of order :math:`1`. 1146 1147""" + r""" 1148Args: 1149 {input} 1150 1151Keyword args: 1152 {out} 1153""".format(**common_args)) 1154 1155modified_bessel_k0 = _add_docstr(_special.special_modified_bessel_k0, 1156 r""" 1157modified_bessel_k0(input, *, out=None) -> Tensor 1158 1159Modified Bessel function of the second kind of order :math:`0`. 1160 1161""" + r""" 1162Args: 1163 {input} 1164 1165Keyword args: 1166 {out} 1167""".format(**common_args)) 1168 1169modified_bessel_k1 = _add_docstr(_special.special_modified_bessel_k1, 1170 r""" 1171modified_bessel_k1(input, *, out=None) -> Tensor 1172 1173Modified Bessel function of the second kind of order :math:`1`. 1174 1175""" + r""" 1176Args: 1177 {input} 1178 1179Keyword args: 1180 {out} 1181""".format(**common_args)) 1182 1183scaled_modified_bessel_k0 = _add_docstr(_special.special_scaled_modified_bessel_k0, 1184 r""" 1185scaled_modified_bessel_k0(input, *, out=None) -> Tensor 1186 1187Scaled modified Bessel function of the second kind of order :math:`0`. 1188 1189""" + r""" 1190Args: 1191 {input} 1192 1193Keyword args: 1194 {out} 1195""".format(**common_args)) 1196 1197scaled_modified_bessel_k1 = _add_docstr(_special.special_scaled_modified_bessel_k1, 1198 r""" 1199scaled_modified_bessel_k1(input, *, out=None) -> Tensor 1200 1201Scaled modified Bessel function of the second kind of order :math:`1`. 1202 1203""" + r""" 1204Args: 1205 {input} 1206 1207Keyword args: 1208 {out} 1209""".format(**common_args)) 1210 1211shifted_chebyshev_polynomial_t = _add_docstr(_special.special_shifted_chebyshev_polynomial_t, 1212 r""" 1213shifted_chebyshev_polynomial_t(input, n, *, out=None) -> Tensor 1214 1215Chebyshev polynomial of the first kind :math:`T_{n}^{\ast}(\text{input})`. 1216 1217""" + r""" 1218Args: 1219 {input} 1220 n (Tensor): Degree of the polynomial. 1221 1222Keyword args: 1223 {out} 1224""".format(**common_args)) 1225 1226shifted_chebyshev_polynomial_u = _add_docstr(_special.special_shifted_chebyshev_polynomial_u, 1227 r""" 1228shifted_chebyshev_polynomial_u(input, n, *, out=None) -> Tensor 1229 1230Chebyshev polynomial of the second kind :math:`U_{n}^{\ast}(\text{input})`. 1231 1232""" + r""" 1233Args: 1234 {input} 1235 n (Tensor): Degree of the polynomial. 1236 1237Keyword args: 1238 {out} 1239""".format(**common_args)) 1240 1241shifted_chebyshev_polynomial_v = _add_docstr(_special.special_shifted_chebyshev_polynomial_v, 1242 r""" 1243shifted_chebyshev_polynomial_v(input, n, *, out=None) -> Tensor 1244 1245Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. 1246 1247""" + r""" 1248Args: 1249 {input} 1250 n (Tensor): Degree of the polynomial. 1251 1252Keyword args: 1253 {out} 1254""".format(**common_args)) 1255 1256shifted_chebyshev_polynomial_w = _add_docstr(_special.special_shifted_chebyshev_polynomial_w, 1257 r""" 1258shifted_chebyshev_polynomial_w(input, n, *, out=None) -> Tensor 1259 1260Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. 1261 1262""" + r""" 1263Args: 1264 {input} 1265 n (Tensor): Degree of the polynomial. 1266 1267Keyword args: 1268 {out} 1269""".format(**common_args)) 1270 1271spherical_bessel_j0 = _add_docstr(_special.special_spherical_bessel_j0, 1272 r""" 1273spherical_bessel_j0(input, *, out=None) -> Tensor 1274 1275Spherical Bessel function of the first kind of order :math:`0`. 1276 1277""" + r""" 1278Args: 1279 {input} 1280 1281Keyword args: 1282 {out} 1283""".format(**common_args)) 1284