1 #pragma once
2
3 #include <c10/util/irange.h>
4 #include <torch/nn/functional/activation.h>
5 #include <torch/nn/modules/utils.h>
6 #include <torch/nn/options/pooling.h>
7
8 namespace torch {
9 namespace nn {
10 namespace functional {
11
12 #ifndef DOXYGEN_SHOULD_SKIP_THIS
13 namespace detail {
avg_pool1d(const Tensor & input,ExpandingArray<1> kernel_size,ExpandingArray<1> stride,ExpandingArray<1> padding,bool ceil_mode,bool count_include_pad)14 inline Tensor avg_pool1d(
15 const Tensor& input,
16 ExpandingArray<1> kernel_size,
17 ExpandingArray<1> stride,
18 ExpandingArray<1> padding,
19 bool ceil_mode,
20 bool count_include_pad) {
21 return torch::avg_pool1d(
22 input, kernel_size, stride, padding, ceil_mode, count_include_pad);
23 }
24 } // namespace detail
25 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
26
27 /// See
28 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.avg_pool1d
29 /// about the exact behavior of this functional.
30 ///
31 /// See the documentation for `torch::nn::functional::AvgPool1dFuncOptions`
32 /// class to learn what optional arguments are supported for this functional.
33 ///
34 /// Example:
35 /// ```
36 /// namespace F = torch::nn::functional;
37 /// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
38 /// ```
avg_pool1d(const Tensor & input,const AvgPool1dFuncOptions & options)39 inline Tensor avg_pool1d(
40 const Tensor& input,
41 const AvgPool1dFuncOptions& options) {
42 return avg_pool1d(
43 input,
44 options.kernel_size(),
45 options.stride(),
46 options.padding(),
47 options.ceil_mode(),
48 options.count_include_pad());
49 }
50
51 #ifndef DOXYGEN_SHOULD_SKIP_THIS
52 namespace detail {
avg_pool2d(const Tensor & input,ExpandingArray<2> kernel_size,ExpandingArray<2> stride,ExpandingArray<2> padding,bool ceil_mode,bool count_include_pad,std::optional<int64_t> divisor_override)53 inline Tensor avg_pool2d(
54 const Tensor& input,
55 ExpandingArray<2> kernel_size,
56 ExpandingArray<2> stride,
57 ExpandingArray<2> padding,
58 bool ceil_mode,
59 bool count_include_pad,
60 std::optional<int64_t> divisor_override) {
61 return torch::avg_pool2d(
62 input,
63 kernel_size,
64 stride,
65 padding,
66 ceil_mode,
67 count_include_pad,
68 divisor_override);
69 }
70 } // namespace detail
71 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
72
73 /// See
74 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.avg_pool2d
75 /// about the exact behavior of this functional.
76 ///
77 /// See the documentation for `torch::nn::functional::AvgPool2dFuncOptions`
78 /// class to learn what optional arguments are supported for this functional.
79 ///
80 /// Example:
81 /// ```
82 /// namespace F = torch::nn::functional;
83 /// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
84 /// ```
avg_pool2d(const Tensor & input,const AvgPool2dFuncOptions & options)85 inline Tensor avg_pool2d(
86 const Tensor& input,
87 const AvgPool2dFuncOptions& options) {
88 return detail::avg_pool2d(
89 input,
90 options.kernel_size(),
91 options.stride(),
92 options.padding(),
93 options.ceil_mode(),
94 options.count_include_pad(),
95 options.divisor_override());
96 }
97
98 #ifndef DOXYGEN_SHOULD_SKIP_THIS
99 namespace detail {
avg_pool3d(const Tensor & input,ExpandingArray<3> kernel_size,ExpandingArray<3> stride,ExpandingArray<3> padding,bool ceil_mode,bool count_include_pad,std::optional<int64_t> divisor_override)100 inline Tensor avg_pool3d(
101 const Tensor& input,
102 ExpandingArray<3> kernel_size,
103 ExpandingArray<3> stride,
104 ExpandingArray<3> padding,
105 bool ceil_mode,
106 bool count_include_pad,
107 std::optional<int64_t> divisor_override) {
108 return torch::avg_pool3d(
109 input,
110 kernel_size,
111 stride,
112 padding,
113 ceil_mode,
114 count_include_pad,
115 divisor_override);
116 }
117 } // namespace detail
118 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
119
120 /// See
121 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.avg_pool3d
122 /// about the exact behavior of this functional.
123 ///
124 /// See the documentation for `torch::nn::functional::AvgPool3dFuncOptions`
125 /// class to learn what optional arguments are supported for this functional.
126 ///
127 /// Example:
128 /// ```
129 /// namespace F = torch::nn::functional;
130 /// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
131 /// ```
avg_pool3d(const Tensor & input,const AvgPool3dFuncOptions & options)132 inline Tensor avg_pool3d(
133 const Tensor& input,
134 const AvgPool3dFuncOptions& options) {
135 return detail::avg_pool3d(
136 input,
137 options.kernel_size(),
138 options.stride(),
139 options.padding(),
140 options.ceil_mode(),
141 options.count_include_pad(),
142 options.divisor_override());
143 }
144
145 // ============================================================================
146
147 #ifndef DOXYGEN_SHOULD_SKIP_THIS
148 namespace detail {
max_pool1d(const Tensor & input,ExpandingArray<1> kernel_size,ExpandingArray<1> stride,ExpandingArray<1> padding,ExpandingArray<1> dilation,bool ceil_mode)149 inline Tensor max_pool1d(
150 const Tensor& input,
151 ExpandingArray<1> kernel_size,
152 ExpandingArray<1> stride,
153 ExpandingArray<1> padding,
154 ExpandingArray<1> dilation,
155 bool ceil_mode) {
156 return torch::max_pool1d(
157 input, kernel_size, stride, padding, dilation, ceil_mode);
158 }
159 } // namespace detail
160 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
161
162 /// See
163 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_pool1d
164 /// about the exact behavior of this functional.
165 ///
166 /// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions`
167 /// class to learn what optional arguments are supported for this functional.
168 ///
169 /// Example:
170 /// ```
171 /// namespace F = torch::nn::functional;
172 /// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
173 /// ```
max_pool1d(const Tensor & input,const MaxPool1dFuncOptions & options)174 inline Tensor max_pool1d(
175 const Tensor& input,
176 const MaxPool1dFuncOptions& options) {
177 return detail::max_pool1d(
178 input,
179 options.kernel_size(),
180 options.stride(),
181 options.padding(),
182 options.dilation(),
183 options.ceil_mode());
184 }
185
186 #ifndef DOXYGEN_SHOULD_SKIP_THIS
187 namespace detail {
max_pool1d_with_indices(const Tensor & input,ExpandingArray<1> kernel_size,ExpandingArray<1> stride,ExpandingArray<1> padding,ExpandingArray<1> dilation,bool ceil_mode)188 inline std::tuple<Tensor, Tensor> max_pool1d_with_indices(
189 const Tensor& input,
190 ExpandingArray<1> kernel_size,
191 ExpandingArray<1> stride,
192 ExpandingArray<1> padding,
193 ExpandingArray<1> dilation,
194 bool ceil_mode) {
195 return torch::max_pool1d_with_indices(
196 input, kernel_size, stride, padding, dilation, ceil_mode);
197 }
198 } // namespace detail
199 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
200
201 /// See the documentation for `torch::nn::functional::MaxPool1dFuncOptions`
202 /// class to learn what optional arguments are supported for this functional.
203 ///
204 /// Example:
205 /// ```
206 /// namespace F = torch::nn::functional;
207 /// F::max_pool1d_with_indices(x, F::MaxPool1dFuncOptions(3).stride(2));
208 /// ```
max_pool1d_with_indices(const Tensor & input,const MaxPool1dFuncOptions & options)209 inline std::tuple<Tensor, Tensor> max_pool1d_with_indices(
210 const Tensor& input,
211 const MaxPool1dFuncOptions& options) {
212 return detail::max_pool1d_with_indices(
213 input,
214 options.kernel_size(),
215 options.stride(),
216 options.padding(),
217 options.dilation(),
218 options.ceil_mode());
219 }
220
221 #ifndef DOXYGEN_SHOULD_SKIP_THIS
222 namespace detail {
max_pool2d(const Tensor & input,ExpandingArray<2> kernel_size,ExpandingArray<2> stride,ExpandingArray<2> padding,ExpandingArray<2> dilation,bool ceil_mode)223 inline Tensor max_pool2d(
224 const Tensor& input,
225 ExpandingArray<2> kernel_size,
226 ExpandingArray<2> stride,
227 ExpandingArray<2> padding,
228 ExpandingArray<2> dilation,
229 bool ceil_mode) {
230 return torch::max_pool2d(
231 input, kernel_size, stride, padding, dilation, ceil_mode);
232 }
233 } // namespace detail
234 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
235
236 /// See
237 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_pool2d
238 /// about the exact behavior of this functional.
239 ///
240 /// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions`
241 /// class to learn what optional arguments are supported for this functional.
242 ///
243 /// Example:
244 /// ```
245 /// namespace F = torch::nn::functional;
246 /// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
247 /// ```
max_pool2d(const Tensor & input,const MaxPool2dFuncOptions & options)248 inline Tensor max_pool2d(
249 const Tensor& input,
250 const MaxPool2dFuncOptions& options) {
251 return detail::max_pool2d(
252 input,
253 options.kernel_size(),
254 options.stride(),
255 options.padding(),
256 options.dilation(),
257 options.ceil_mode());
258 }
259
260 #ifndef DOXYGEN_SHOULD_SKIP_THIS
261 namespace detail {
max_pool2d_with_indices(const Tensor & input,ExpandingArray<2> kernel_size,ExpandingArray<2> stride,ExpandingArray<2> padding,ExpandingArray<2> dilation,bool ceil_mode)262 inline std::tuple<Tensor, Tensor> max_pool2d_with_indices(
263 const Tensor& input,
264 ExpandingArray<2> kernel_size,
265 ExpandingArray<2> stride,
266 ExpandingArray<2> padding,
267 ExpandingArray<2> dilation,
268 bool ceil_mode) {
269 return torch::max_pool2d_with_indices(
270 input, kernel_size, stride, padding, dilation, ceil_mode);
271 }
272 } // namespace detail
273 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
274
275 /// See the documentation for `torch::nn::functional::MaxPool2dFuncOptions`
276 /// class to learn what optional arguments are supported for this functional.
277 ///
278 /// Example:
279 /// ```
280 /// namespace F = torch::nn::functional;
281 /// F::max_pool2d_with_indices(x, F::MaxPool2dFuncOptions(3).stride(2));
282 /// ```
max_pool2d_with_indices(const Tensor & input,const MaxPool2dFuncOptions & options)283 inline std::tuple<Tensor, Tensor> max_pool2d_with_indices(
284 const Tensor& input,
285 const MaxPool2dFuncOptions& options) {
286 return detail::max_pool2d_with_indices(
287 input,
288 options.kernel_size(),
289 options.stride(),
290 options.padding(),
291 options.dilation(),
292 options.ceil_mode());
293 }
294
295 #ifndef DOXYGEN_SHOULD_SKIP_THIS
296 namespace detail {
max_pool3d(const Tensor & input,ExpandingArray<3> kernel_size,ExpandingArray<3> stride,ExpandingArray<3> padding,ExpandingArray<3> dilation,bool ceil_mode)297 inline Tensor max_pool3d(
298 const Tensor& input,
299 ExpandingArray<3> kernel_size,
300 ExpandingArray<3> stride,
301 ExpandingArray<3> padding,
302 ExpandingArray<3> dilation,
303 bool ceil_mode) {
304 return torch::max_pool3d(
305 input, kernel_size, stride, padding, dilation, ceil_mode);
306 }
307 } // namespace detail
308 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
309
310 /// See
311 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_pool3d
312 /// about the exact behavior of this functional.
313 ///
314 /// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions`
315 /// class to learn what optional arguments are supported for this functional.
316 ///
317 /// Example:
318 /// ```
319 /// namespace F = torch::nn::functional;
320 /// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
321 /// ```
max_pool3d(const Tensor & input,const MaxPool3dFuncOptions & options)322 inline Tensor max_pool3d(
323 const Tensor& input,
324 const MaxPool3dFuncOptions& options) {
325 return detail::max_pool3d(
326 input,
327 options.kernel_size(),
328 options.stride(),
329 options.padding(),
330 options.dilation(),
331 options.ceil_mode());
332 }
333
334 #ifndef DOXYGEN_SHOULD_SKIP_THIS
335 namespace detail {
max_pool3d_with_indices(const Tensor & input,ExpandingArray<3> kernel_size,ExpandingArray<3> stride,ExpandingArray<3> padding,ExpandingArray<3> dilation,bool ceil_mode)336 inline std::tuple<Tensor, Tensor> max_pool3d_with_indices(
337 const Tensor& input,
338 ExpandingArray<3> kernel_size,
339 ExpandingArray<3> stride,
340 ExpandingArray<3> padding,
341 ExpandingArray<3> dilation,
342 bool ceil_mode) {
343 return torch::max_pool3d_with_indices(
344 input, kernel_size, stride, padding, dilation, ceil_mode);
345 }
346 } // namespace detail
347 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
348
349 /// See the documentation for `torch::nn::functional::MaxPool3dFuncOptions`
350 /// class to learn what optional arguments are supported for this functional.
351 ///
352 /// Example:
353 /// ```
354 /// namespace F = torch::nn::functional;
355 /// F::max_pool3d_with_indices(x, F::MaxPool3dFuncOptions(3).stride(2));
356 /// ```
max_pool3d_with_indices(const Tensor & input,const MaxPool3dFuncOptions & options)357 inline std::tuple<Tensor, Tensor> max_pool3d_with_indices(
358 const Tensor& input,
359 const MaxPool3dFuncOptions& options) {
360 return detail::max_pool3d_with_indices(
361 input,
362 options.kernel_size(),
363 options.stride(),
364 options.padding(),
365 options.dilation(),
366 options.ceil_mode());
367 }
368
369 // ============================================================================
370
371 #ifndef DOXYGEN_SHOULD_SKIP_THIS
372 namespace detail {
adaptive_max_pool1d_with_indices(const Tensor & input,ExpandingArray<1> output_size)373 inline std::tuple<Tensor, Tensor> adaptive_max_pool1d_with_indices(
374 const Tensor& input,
375 ExpandingArray<1> output_size) {
376 return torch::adaptive_max_pool1d(input, output_size);
377 }
378 } // namespace detail
379
380 /// See the documentation for
381 /// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what
382 /// optional arguments are supported for this functional.
383 ///
384 /// Example:
385 /// ```
386 /// namespace F = torch::nn::functional;
387 /// F::adaptive_max_pool1d_with_indices(x, F::AdaptiveMaxPool1dFuncOptions(3));
388 /// ```
adaptive_max_pool1d_with_indices(const Tensor & input,const AdaptiveMaxPool1dFuncOptions & options)389 inline std::tuple<Tensor, Tensor> adaptive_max_pool1d_with_indices(
390 const Tensor& input,
391 const AdaptiveMaxPool1dFuncOptions& options) {
392 return detail::adaptive_max_pool1d_with_indices(input, options.output_size());
393 }
394
395 namespace detail {
adaptive_max_pool1d(const Tensor & input,ExpandingArray<1> output_size)396 inline Tensor adaptive_max_pool1d(
397 const Tensor& input,
398 ExpandingArray<1> output_size) {
399 return std::get<0>(adaptive_max_pool1d_with_indices(input, output_size));
400 }
401 } // namespace detail
402 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
403
404 /// See
405 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_max_pool1d
406 /// about the exact behavior of this functional.
407 ///
408 /// See the documentation for
409 /// `torch::nn::functional::AdaptiveMaxPool1dFuncOptions` class to learn what
410 /// optional arguments are supported for this functional.
411 ///
412 /// Example:
413 /// ```
414 /// namespace F = torch::nn::functional;
415 /// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
416 /// ```
adaptive_max_pool1d(const Tensor & input,const AdaptiveMaxPool1dFuncOptions & options)417 inline Tensor adaptive_max_pool1d(
418 const Tensor& input,
419 const AdaptiveMaxPool1dFuncOptions& options) {
420 return detail::adaptive_max_pool1d(input, options.output_size());
421 }
422
423 #ifndef DOXYGEN_SHOULD_SKIP_THIS
424 namespace detail {
adaptive_max_pool2d_with_indices(const Tensor & input,ExpandingArrayWithOptionalElem<2> output_size)425 inline std::tuple<Tensor, Tensor> adaptive_max_pool2d_with_indices(
426 const Tensor& input,
427 ExpandingArrayWithOptionalElem<2> output_size) {
428 auto output_size_ =
429 torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
430 return torch::adaptive_max_pool2d(input, output_size_);
431 }
432 } // namespace detail
433 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
434
435 /// See the documentation for
436 /// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what
437 /// optional arguments are supported for this functional.
438 ///
439 /// Example:
440 /// ```
441 /// namespace F = torch::nn::functional;
442 /// F::adaptive_max_pool2d_with_indices(x, F::AdaptiveMaxPool2dFuncOptions(3));
443 /// ```
adaptive_max_pool2d_with_indices(const Tensor & input,const AdaptiveMaxPool2dFuncOptions & options)444 inline std::tuple<Tensor, Tensor> adaptive_max_pool2d_with_indices(
445 const Tensor& input,
446 const AdaptiveMaxPool2dFuncOptions& options) {
447 return detail::adaptive_max_pool2d_with_indices(input, options.output_size());
448 }
449
450 #ifndef DOXYGEN_SHOULD_SKIP_THIS
451 namespace detail {
adaptive_max_pool2d(const Tensor & input,ExpandingArrayWithOptionalElem<2> output_size)452 inline Tensor adaptive_max_pool2d(
453 const Tensor& input,
454 ExpandingArrayWithOptionalElem<2> output_size) {
455 return std::get<0>(adaptive_max_pool2d_with_indices(input, output_size));
456 }
457 } // namespace detail
458 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
459
460 /// See
461 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_max_pool2d
462 /// about the exact behavior of this functional.
463 ///
464 /// See the documentation for
465 /// `torch::nn::functional::AdaptiveMaxPool2dFuncOptions` class to learn what
466 /// optional arguments are supported for this functional.
467 ///
468 /// Example:
469 /// ```
470 /// namespace F = torch::nn::functional;
471 /// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
472 /// ```
adaptive_max_pool2d(const Tensor & input,const AdaptiveMaxPool2dFuncOptions & options)473 inline Tensor adaptive_max_pool2d(
474 const Tensor& input,
475 const AdaptiveMaxPool2dFuncOptions& options) {
476 return detail::adaptive_max_pool2d(input, options.output_size());
477 }
478
479 #ifndef DOXYGEN_SHOULD_SKIP_THIS
480 namespace detail {
adaptive_max_pool3d_with_indices(const Tensor & input,ExpandingArrayWithOptionalElem<3> output_size)481 inline std::tuple<Tensor, Tensor> adaptive_max_pool3d_with_indices(
482 const Tensor& input,
483 ExpandingArrayWithOptionalElem<3> output_size) {
484 auto output_size_ =
485 torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
486 return torch::adaptive_max_pool3d(input, output_size_);
487 }
488 } // namespace detail
489 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
490
491 /// See the documentation for
492 /// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what
493 /// optional arguments are supported for this functional.
494 ///
495 /// Example:
496 /// ```
497 /// namespace F = torch::nn::functional;
498 /// F::adaptive_max_pool3d_with_indices(x, F::AdaptiveMaxPool3dFuncOptions(3));
499 /// ```
adaptive_max_pool3d_with_indices(const Tensor & input,const AdaptiveMaxPool3dFuncOptions & options)500 inline std::tuple<Tensor, Tensor> adaptive_max_pool3d_with_indices(
501 const Tensor& input,
502 const AdaptiveMaxPool3dFuncOptions& options) {
503 return detail::adaptive_max_pool3d_with_indices(input, options.output_size());
504 }
505
506 #ifndef DOXYGEN_SHOULD_SKIP_THIS
507 namespace detail {
adaptive_max_pool3d(const Tensor & input,ExpandingArrayWithOptionalElem<3> output_size)508 inline Tensor adaptive_max_pool3d(
509 const Tensor& input,
510 ExpandingArrayWithOptionalElem<3> output_size) {
511 return std::get<0>(adaptive_max_pool3d_with_indices(input, output_size));
512 }
513 } // namespace detail
514 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
515
516 /// See
517 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_max_pool3d
518 /// about the exact behavior of this functional.
519 ///
520 /// See the documentation for
521 /// `torch::nn::functional::AdaptiveMaxPool3dFuncOptions` class to learn what
522 /// optional arguments are supported for this functional.
523 ///
524 /// Example:
525 /// ```
526 /// namespace F = torch::nn::functional;
527 /// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
528 /// ```
adaptive_max_pool3d(const Tensor & input,const AdaptiveMaxPool3dFuncOptions & options)529 inline Tensor adaptive_max_pool3d(
530 const Tensor& input,
531 const AdaptiveMaxPool3dFuncOptions& options) {
532 return detail::adaptive_max_pool3d(input, options.output_size());
533 }
534
535 // ============================================================================
536
537 #ifndef DOXYGEN_SHOULD_SKIP_THIS
538 namespace detail {
adaptive_avg_pool1d(const Tensor & input,ExpandingArray<1> output_size)539 inline Tensor adaptive_avg_pool1d(
540 const Tensor& input,
541 ExpandingArray<1> output_size) {
542 return torch::adaptive_avg_pool1d(input, output_size);
543 }
544 } // namespace detail
545 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
546
547 /// See
548 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_avg_pool1d
549 /// about the exact behavior of this functional.
550 ///
551 /// See the documentation for
552 /// `torch::nn::functional::AdaptiveAvgPool1dFuncOptions` class to learn what
553 /// optional arguments are supported for this functional.
554 ///
555 /// Example:
556 /// ```
557 /// namespace F = torch::nn::functional;
558 /// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
559 /// ```
adaptive_avg_pool1d(const Tensor & input,const AdaptiveAvgPool1dFuncOptions & options)560 inline Tensor adaptive_avg_pool1d(
561 const Tensor& input,
562 const AdaptiveAvgPool1dFuncOptions& options) {
563 return detail::adaptive_avg_pool1d(input, options.output_size());
564 }
565
566 #ifndef DOXYGEN_SHOULD_SKIP_THIS
567 namespace detail {
adaptive_avg_pool2d(const Tensor & input,ExpandingArrayWithOptionalElem<2> output_size)568 inline Tensor adaptive_avg_pool2d(
569 const Tensor& input,
570 ExpandingArrayWithOptionalElem<2> output_size) {
571 auto output_size_ =
572 torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
573 return torch::adaptive_avg_pool2d(input, output_size_);
574 }
575 } // namespace detail
576 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
577
578 /// See
579 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_avg_pool2d
580 /// about the exact behavior of this functional.
581 ///
582 /// See the documentation for
583 /// `torch::nn::functional::AdaptiveAvgPool2dFuncOptions` class to learn what
584 /// optional arguments are supported for this functional.
585 ///
586 /// Example:
587 /// ```
588 /// namespace F = torch::nn::functional;
589 /// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
590 /// ```
adaptive_avg_pool2d(const Tensor & input,const AdaptiveAvgPool2dFuncOptions & options)591 inline Tensor adaptive_avg_pool2d(
592 const Tensor& input,
593 const AdaptiveAvgPool2dFuncOptions& options) {
594 return detail::adaptive_avg_pool2d(input, options.output_size());
595 }
596
597 #ifndef DOXYGEN_SHOULD_SKIP_THIS
598 namespace detail {
adaptive_avg_pool3d(const Tensor & input,ExpandingArrayWithOptionalElem<3> output_size)599 inline Tensor adaptive_avg_pool3d(
600 const Tensor& input,
601 ExpandingArrayWithOptionalElem<3> output_size) {
602 auto output_size_ =
603 torch::nn::modules::utils::_list_with_default(output_size, input.sizes());
604 return torch::adaptive_avg_pool3d(input, output_size_);
605 }
606 } // namespace detail
607 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
608
609 /// See
610 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_avg_pool3d
611 /// about the exact behavior of this functional.
612 ///
613 /// See the documentation for
614 /// `torch::nn::functional::AdaptiveAvgPool3dFuncOptions` class to learn what
615 /// optional arguments are supported for this functional.
616 ///
617 /// Example:
618 /// ```
619 /// namespace F = torch::nn::functional;
620 /// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
621 /// ```
adaptive_avg_pool3d(const Tensor & input,const AdaptiveAvgPool3dFuncOptions & options)622 inline Tensor adaptive_avg_pool3d(
623 const Tensor& input,
624 const AdaptiveAvgPool3dFuncOptions& options) {
625 return detail::adaptive_avg_pool3d(input, options.output_size());
626 }
627
628 // ============================================================================
629
_unpool_output_size(const Tensor & input,const IntArrayRef & kernel_size,const IntArrayRef & stride,const IntArrayRef & padding,const std::optional<std::vector<int64_t>> & output_size)630 inline std::vector<int64_t> _unpool_output_size(
631 const Tensor& input,
632 const IntArrayRef& kernel_size,
633 const IntArrayRef& stride,
634 const IntArrayRef& padding,
635 const std::optional<std::vector<int64_t>>& output_size) {
636 auto input_size = input.sizes();
637 std::vector<int64_t> default_size;
638 for (const auto d : c10::irange(kernel_size.size())) {
639 default_size.push_back(
640 (input_size[input_size.size() - kernel_size.size() + d] - 1) *
641 stride[d] +
642 kernel_size[d] - 2 * padding[d]);
643 }
644 if (!output_size) {
645 return default_size;
646 } else {
647 std::vector<int64_t> output_size_;
648 if (output_size->size() == kernel_size.size() + 2) {
649 output_size_ = IntArrayRef(*output_size).slice(2).vec();
650 }
651 if (output_size_.size() != kernel_size.size()) {
652 TORCH_CHECK(
653 false,
654 "output_size should be a sequence containing ",
655 kernel_size.size(),
656 " or ",
657 kernel_size.size() + 2,
658 " elements, but it has a length of '",
659 output_size_.size(),
660 "'");
661 }
662 for (const auto d : c10::irange(kernel_size.size())) {
663 const auto min_size = default_size[d] - stride[d];
664 const auto max_size = default_size[d] + stride[d];
665 if (!(min_size <= output_size_[d] && output_size_[d] <= max_size)) {
666 TORCH_CHECK(
667 false,
668 "invalid output_size ",
669 output_size_,
670 " (dim ",
671 d,
672 " must be between ",
673 min_size,
674 " and ",
675 max_size,
676 ")");
677 }
678 }
679 return output_size_;
680 }
681 }
682
683 #ifndef DOXYGEN_SHOULD_SKIP_THIS
684 namespace detail {
max_unpool1d(const Tensor & input,const Tensor & indices,ExpandingArray<1> kernel_size,ExpandingArray<1> stride,ExpandingArray<1> padding,const std::optional<std::vector<int64_t>> & output_size)685 inline Tensor max_unpool1d(
686 const Tensor& input,
687 const Tensor& indices,
688 ExpandingArray<1> kernel_size,
689 ExpandingArray<1> stride,
690 ExpandingArray<1> padding,
691 const std::optional<std::vector<int64_t>>& output_size) {
692 auto output_size_ =
693 _unpool_output_size(input, kernel_size, stride, padding, output_size);
694 output_size_.push_back(1);
695 return torch::max_unpool2d(
696 input.unsqueeze(-1), indices.unsqueeze(-1), output_size_)
697 .squeeze(-1);
698 }
699 } // namespace detail
700 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
701
702 /// See
703 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_unpool1d
704 /// about the exact behavior of this functional.
705 ///
706 /// See the documentation for `torch::nn::functional::MaxUnpool1dFuncOptions`
707 /// class to learn what optional arguments are supported for this functional.
708 ///
709 /// Example:
710 /// ```
711 /// namespace F = torch::nn::functional;
712 /// F::max_unpool1d(x, indices,
713 /// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
714 /// ```
max_unpool1d(const Tensor & input,const Tensor & indices,const MaxUnpool1dFuncOptions & options)715 inline Tensor max_unpool1d(
716 const Tensor& input,
717 const Tensor& indices,
718 const MaxUnpool1dFuncOptions& options) {
719 return detail::max_unpool1d(
720 input,
721 indices,
722 options.kernel_size(),
723 options.stride(),
724 options.padding(),
725 options.output_size());
726 }
727
728 #ifndef DOXYGEN_SHOULD_SKIP_THIS
729 namespace detail {
max_unpool2d(const Tensor & input,const Tensor & indices,ExpandingArray<2> kernel_size,ExpandingArray<2> stride,ExpandingArray<2> padding,const std::optional<std::vector<int64_t>> & output_size)730 inline Tensor max_unpool2d(
731 const Tensor& input,
732 const Tensor& indices,
733 ExpandingArray<2> kernel_size,
734 ExpandingArray<2> stride,
735 ExpandingArray<2> padding,
736 const std::optional<std::vector<int64_t>>& output_size) {
737 auto output_size_ =
738 _unpool_output_size(input, kernel_size, stride, padding, output_size);
739
740 return torch::max_unpool2d(input, indices, output_size_);
741 }
742 } // namespace detail
743 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
744
745 /// See
746 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_unpool2d
747 /// about the exact behavior of this functional.
748 ///
749 /// See the documentation for `torch::nn::functional::MaxUnpool2dFuncOptions`
750 /// class to learn what optional arguments are supported for this functional.
751 ///
752 /// Example:
753 /// ```
754 /// namespace F = torch::nn::functional;
755 /// F::max_unpool2d(x, indices,
756 /// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
757 /// ```
max_unpool2d(const Tensor & input,const Tensor & indices,const MaxUnpool2dFuncOptions & options)758 inline Tensor max_unpool2d(
759 const Tensor& input,
760 const Tensor& indices,
761 const MaxUnpool2dFuncOptions& options) {
762 return detail::max_unpool2d(
763 input,
764 indices,
765 options.kernel_size(),
766 options.stride(),
767 options.padding(),
768 options.output_size());
769 }
770
771 #ifndef DOXYGEN_SHOULD_SKIP_THIS
772 namespace detail {
max_unpool3d(const Tensor & input,const Tensor & indices,ExpandingArray<3> kernel_size,ExpandingArray<3> stride,ExpandingArray<3> padding,const std::optional<std::vector<int64_t>> & output_size)773 inline Tensor max_unpool3d(
774 const Tensor& input,
775 const Tensor& indices,
776 ExpandingArray<3> kernel_size,
777 ExpandingArray<3> stride,
778 ExpandingArray<3> padding,
779 const std::optional<std::vector<int64_t>>& output_size) {
780 auto output_size_ =
781 _unpool_output_size(input, kernel_size, stride, padding, output_size);
782
783 return torch::max_unpool3d(input, indices, output_size_, stride, padding);
784 }
785 } // namespace detail
786 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
787
788 /// See
789 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.max_unpool3d
790 /// about the exact behavior of this functional.
791 ///
792 /// See the documentation for `torch::nn::functional::MaxUnpool3dFuncOptions`
793 /// class to learn what optional arguments are supported for this functional.
794 ///
795 /// Example:
796 /// ```
797 /// namespace F = torch::nn::functional;
798 /// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
799 /// ```
max_unpool3d(const Tensor & input,const Tensor & indices,const MaxUnpool3dFuncOptions & options)800 inline Tensor max_unpool3d(
801 const Tensor& input,
802 const Tensor& indices,
803 const MaxUnpool3dFuncOptions& options) {
804 return detail::max_unpool3d(
805 input,
806 indices,
807 options.kernel_size(),
808 options.stride(),
809 options.padding(),
810 options.output_size());
811 }
812
813 // ============================================================================
814
815 #ifndef DOXYGEN_SHOULD_SKIP_THIS
816 namespace detail {
fractional_max_pool2d_with_indices(const Tensor & input,const ExpandingArray<2> & kernel_size,const std::optional<ExpandingArray<2>> & output_size,const std::optional<ExpandingArray<2,double>> & output_ratio,const Tensor & _random_samples)817 inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices(
818 const Tensor& input,
819 const ExpandingArray<2>& kernel_size,
820 const std::optional<ExpandingArray<2>>& output_size,
821 const std::optional<ExpandingArray<2, double>>& output_ratio,
822 const Tensor& _random_samples) {
823 if (output_size == std::nullopt && output_ratio == std::nullopt) {
824 TORCH_CHECK(
825 false,
826 "fractional_max_pool2d requires specifying either ",
827 "an output_size or an output_ratio");
828 }
829 std::optional<ExpandingArray<2>> output_size_ = output_size;
830 if (output_size_ == std::nullopt) {
831 TORCH_INTERNAL_ASSERT(output_ratio != std::nullopt);
832 output_size_ = {
833 (int64_t)(static_cast<double>(input.size(-2)) *
834 (*output_ratio.value())[0]),
835 (int64_t)(static_cast<double>(input.size(-1)) *
836 (*output_ratio.value())[1])};
837 }
838
839 Tensor _random_samples_ = _random_samples;
840 if (!_random_samples_.defined()) {
841 auto n_batch = input.dim() == 3 ? 1 : input.size(0);
842 _random_samples_ = torch::rand(
843 {n_batch, input.size(-3), 2},
844 torch::TensorOptions().dtype(input.dtype()).device(input.device()));
845 }
846 return torch::fractional_max_pool2d(
847 input, kernel_size, *output_size_, _random_samples_);
848 }
849 } // namespace detail
850 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
851
852 /// See the documentation for
853 /// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what
854 /// optional arguments are supported for this functional.
855 ///
856 /// Example:
857 /// ```
858 /// namespace F = torch::nn::functional;
859 /// F::fractional_max_pool2d_with_indices(x,
860 /// F::FractionalMaxPool2dFuncOptions(3).output_size(2));
861 /// ```
fractional_max_pool2d_with_indices(const Tensor & input,const FractionalMaxPool2dFuncOptions & options)862 inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices(
863 const Tensor& input,
864 const FractionalMaxPool2dFuncOptions& options) {
865 return detail::fractional_max_pool2d_with_indices(
866 input,
867 options.kernel_size(),
868 options.output_size(),
869 options.output_ratio(),
870 options._random_samples());
871 }
872
873 #ifndef DOXYGEN_SHOULD_SKIP_THIS
874 namespace detail {
fractional_max_pool2d(const Tensor & input,ExpandingArray<2> kernel_size,std::optional<ExpandingArray<2>> output_size,std::optional<ExpandingArray<2,double>> output_ratio,const Tensor & _random_samples)875 inline Tensor fractional_max_pool2d(
876 const Tensor& input,
877 ExpandingArray<2> kernel_size,
878 std::optional<ExpandingArray<2>> output_size,
879 std::optional<ExpandingArray<2, double>> output_ratio,
880 const Tensor& _random_samples) {
881 return std::get<0>(fractional_max_pool2d_with_indices(
882 input, kernel_size, output_size, output_ratio, _random_samples));
883 }
884 } // namespace detail
885 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
886
887 /// See the documentation for
888 /// `torch::nn::functional::FractionalMaxPool2dFuncOptions` class to learn what
889 /// optional arguments are supported for this functional.
890 ///
891 /// Example:
892 /// ```
893 /// namespace F = torch::nn::functional;
894 /// F::fractional_max_pool2d(x,
895 /// F::FractionalMaxPool2dFuncOptions(3).output_size(2));
896 /// ```
fractional_max_pool2d(const Tensor & input,const FractionalMaxPool2dFuncOptions & options)897 inline Tensor fractional_max_pool2d(
898 const Tensor& input,
899 const FractionalMaxPool2dFuncOptions& options) {
900 return detail::fractional_max_pool2d(
901 input,
902 options.kernel_size(),
903 options.output_size(),
904 options.output_ratio(),
905 options._random_samples());
906 }
907
908 #ifndef DOXYGEN_SHOULD_SKIP_THIS
909 namespace detail {
fractional_max_pool3d_with_indices(const Tensor & input,const ExpandingArray<3> & kernel_size,const std::optional<ExpandingArray<3>> & output_size,const std::optional<ExpandingArray<3,double>> & output_ratio,const Tensor & _random_samples)910 inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices(
911 const Tensor& input,
912 const ExpandingArray<3>& kernel_size,
913 const std::optional<ExpandingArray<3>>& output_size,
914 const std::optional<ExpandingArray<3, double>>& output_ratio,
915 const Tensor& _random_samples) {
916 if (output_size == std::nullopt && output_ratio == std::nullopt) {
917 TORCH_CHECK(
918 false,
919 "fractional_max_pool3d requires specifying either ",
920 "an output_size or an output_ratio");
921 }
922
923 std::optional<ExpandingArray<3>> output_size_ = output_size;
924 if (output_size_ == std::nullopt) {
925 TORCH_INTERNAL_ASSERT(output_ratio != std::nullopt);
926 output_size_ = {
927 (int64_t)(static_cast<double>(input.size(-3)) *
928 (*output_ratio.value())[0]),
929 (int64_t)(static_cast<double>(input.size(-2)) *
930 (*output_ratio.value())[1]),
931 (int64_t)(static_cast<double>(input.size(-1)) *
932 (*output_ratio.value())[2])};
933 }
934
935 Tensor _random_samples_ = _random_samples;
936 if (!_random_samples_.defined()) {
937 auto n_batch = input.dim() == 4 ? 1 : input.size(0);
938 _random_samples_ = torch::rand(
939 {n_batch, input.size(-4), 3},
940 torch::TensorOptions().dtype(input.dtype()).device(input.device()));
941 }
942 return torch::fractional_max_pool3d(
943 input, kernel_size, *output_size_, _random_samples_);
944 }
945 } // namespace detail
946 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
947
948 /// See the documentation for
949 /// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what
950 /// optional arguments are supported for this functional.
951 ///
952 /// Example:
953 /// ```
954 /// namespace F = torch::nn::functional;
955 /// F::fractional_max_pool3d_with_indices(x,
956 /// F::FractionalMaxPool3dFuncOptions(3).output_size(2));
957 /// ```
fractional_max_pool3d_with_indices(const Tensor & input,const FractionalMaxPool3dFuncOptions & options)958 inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices(
959 const Tensor& input,
960 const FractionalMaxPool3dFuncOptions& options) {
961 return detail::fractional_max_pool3d_with_indices(
962 input,
963 options.kernel_size(),
964 options.output_size(),
965 options.output_ratio(),
966 options._random_samples());
967 }
968
969 #ifndef DOXYGEN_SHOULD_SKIP_THIS
970 namespace detail {
fractional_max_pool3d(const Tensor & input,ExpandingArray<3> kernel_size,std::optional<ExpandingArray<3>> output_size,std::optional<ExpandingArray<3,double>> output_ratio,const Tensor & _random_samples)971 inline Tensor fractional_max_pool3d(
972 const Tensor& input,
973 ExpandingArray<3> kernel_size,
974 std::optional<ExpandingArray<3>> output_size,
975 std::optional<ExpandingArray<3, double>> output_ratio,
976 const Tensor& _random_samples) {
977 return std::get<0>(fractional_max_pool3d_with_indices(
978 input, kernel_size, output_size, output_ratio, _random_samples));
979 }
980 } // namespace detail
981 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
982
983 /// See the documentation for
984 /// `torch::nn::functional::FractionalMaxPool3dFuncOptions` class to learn what
985 /// optional arguments are supported for this functional.
986 ///
987 /// Example:
988 /// ```
989 /// namespace F = torch::nn::functional;
990 /// F::fractional_max_pool3d(x,
991 /// F::FractionalMaxPool3dFuncOptions(3).output_size(2));
992 /// ```
fractional_max_pool3d(const Tensor & input,const FractionalMaxPool3dFuncOptions & options)993 inline Tensor fractional_max_pool3d(
994 const Tensor& input,
995 const FractionalMaxPool3dFuncOptions& options) {
996 return detail::fractional_max_pool3d(
997 input,
998 options.kernel_size(),
999 options.output_size(),
1000 options.output_ratio(),
1001 options._random_samples());
1002 }
1003
1004 // ============================================================================
1005
1006 #ifndef DOXYGEN_SHOULD_SKIP_THIS
1007 namespace detail {
lp_pool1d(const Tensor & input,double norm_type,ExpandingArray<1> kernel_size,ExpandingArray<1> stride,bool ceil_mode)1008 inline Tensor lp_pool1d(
1009 const Tensor& input,
1010 double norm_type,
1011 ExpandingArray<1> kernel_size,
1012 ExpandingArray<1> stride,
1013 bool ceil_mode) {
1014 Tensor out = detail::avg_pool1d(
1015 input.pow(norm_type),
1016 kernel_size,
1017 stride,
1018 /*padding=*/0,
1019 ceil_mode,
1020 /*count_include_pad=*/true);
1021
1022 return (torch::sign(out) * relu(torch::abs(out)))
1023 .mul((*kernel_size)[0])
1024 .pow(1. / norm_type);
1025 }
1026 } // namespace detail
1027 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
1028
1029 /// See
1030 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.lp_pool1d
1031 /// about the exact behavior of this functional.
1032 ///
1033 /// See the documentation for `torch::nn::functional::LPPool1dFuncOptions` class
1034 /// to learn what optional arguments are supported for this functional.
1035 ///
1036 /// Example:
1037 /// ```
1038 /// namespace F = torch::nn::functional;
1039 /// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
1040 /// ```
lp_pool1d(const Tensor & input,const LPPool1dFuncOptions & options)1041 inline Tensor lp_pool1d(
1042 const Tensor& input,
1043 const LPPool1dFuncOptions& options) {
1044 return detail::lp_pool1d(
1045 input,
1046 options.norm_type(),
1047 options.kernel_size(),
1048 options.stride(),
1049 options.ceil_mode());
1050 }
1051
1052 #ifndef DOXYGEN_SHOULD_SKIP_THIS
1053 namespace detail {
lp_pool2d(const Tensor & input,double norm_type,ExpandingArray<2> kernel_size,ExpandingArray<2> stride,bool ceil_mode)1054 inline Tensor lp_pool2d(
1055 const Tensor& input,
1056 double norm_type,
1057 ExpandingArray<2> kernel_size,
1058 ExpandingArray<2> stride,
1059 bool ceil_mode) {
1060 int kw = (*kernel_size)[0];
1061 int kh = (*kernel_size)[1];
1062 Tensor out = detail::avg_pool2d(
1063 input.pow(norm_type),
1064 kernel_size,
1065 stride,
1066 /*padding=*/0,
1067 ceil_mode,
1068 /*count_include_pad=*/true,
1069 /*divisor_override=*/std::nullopt);
1070
1071 return (torch::sign(out) * relu(torch::abs(out)))
1072 .mul(kw * kh)
1073 .pow(1. / norm_type);
1074 }
1075 } // namespace detail
1076 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
1077
1078 /// See
1079 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.lp_pool2d
1080 /// about the exact behavior of this functional.
1081 ///
1082 /// See the documentation for `torch::nn::functional::LPPool2dFuncOptions` class
1083 /// to learn what optional arguments are supported for this functional.
1084 ///
1085 /// Example:
1086 /// ```
1087 /// namespace F = torch::nn::functional;
1088 /// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
1089 /// ```
lp_pool2d(const Tensor & input,const LPPool2dFuncOptions & options)1090 inline Tensor lp_pool2d(
1091 const Tensor& input,
1092 const LPPool2dFuncOptions& options) {
1093 return detail::lp_pool2d(
1094 input,
1095 options.norm_type(),
1096 options.kernel_size(),
1097 options.stride(),
1098 options.ceil_mode());
1099 }
1100
1101 #ifndef DOXYGEN_SHOULD_SKIP_THIS
1102 namespace detail {
lp_pool3d(const Tensor & input,double norm_type,ExpandingArray<3> kernel_size,ExpandingArray<3> stride,bool ceil_mode)1103 inline Tensor lp_pool3d(
1104 const Tensor& input,
1105 double norm_type,
1106 ExpandingArray<3> kernel_size,
1107 ExpandingArray<3> stride,
1108 bool ceil_mode) {
1109 int kd = (*kernel_size)[0];
1110 int kw = (*kernel_size)[1];
1111 int kh = (*kernel_size)[2];
1112 Tensor out = detail::avg_pool3d(
1113 input.pow(norm_type),
1114 kernel_size,
1115 stride,
1116 /*padding=*/0,
1117 ceil_mode,
1118 /*count_include_pad=*/true,
1119 /*divisor_override=*/std::nullopt);
1120
1121 return (torch::sign(out) * relu(torch::abs(out)))
1122 .mul(kd * kw * kh)
1123 .pow(1. / norm_type);
1124 }
1125 } // namespace detail
1126 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
1127
1128 /// See
1129 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.lp_pool3d
1130 /// about the exact behavior of this functional.
1131 ///
1132 /// See the documentation for `torch::nn::functional::LPPool3dFuncOptions` class
1133 /// to learn what optional arguments are supported for this functional.
1134 ///
1135 /// Example:
1136 /// ```
1137 /// namespace F = torch::nn::functional;
1138 /// F::lp_pool3d(x, F::LPPool3dFuncOptions(3, {3, 3, 5}).stride(3));
1139 /// ```
lp_pool3d(const Tensor & input,const LPPool3dFuncOptions & options)1140 inline Tensor lp_pool3d(
1141 const Tensor& input,
1142 const LPPool3dFuncOptions& options) {
1143 return detail::lp_pool3d(
1144 input,
1145 options.norm_type(),
1146 options.kernel_size(),
1147 options.stride(),
1148 options.ceil_mode());
1149 }
1150
1151 } // namespace functional
1152 } // namespace nn
1153 } // namespace torch
1154