For use with nn_sequential.
Shape
Input:
(*, S_start,..., S_i, ..., S_end, *)
, whereS_i
is the size at dimensioni
and*
means any number of dimensions including none.Output:
(*, S_start*...*S_i*...S_end, *)
.
Examples
if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10 0.3744 0.0925 -0.7475 1.3269 -0.5882 -0.9101 -0.6279 0.4557 1.3038 -0.1351
#> 0.0440 -0.2138 -0.0278 1.0037 0.0318 0.0934 -0.5032 -0.3149 1.0115 -0.1735
#> 0.7518 0.2354 1.4131 0.8548 -0.1538 1.4043 1.0196 1.1634 2.9539 0.4478
#> 1.0757 1.7619 -0.5264 -0.3285 -0.5814 -0.0089 -0.9595 0.5578 -0.5278 -1.7170
#> 0.5469 -0.1644 1.3332 0.3362 -0.2417 -0.9801 0.4898 -0.8932 0.3508 0.0165
#> -0.5230 0.8418 -0.7395 -0.9457 -0.9293 0.4263 -1.2827 -0.2857 -0.1399 -1.2839
#> 0.5236 -0.3141 -0.3451 1.5628 1.3143 1.3873 0.0493 -0.9291 1.6766 0.2576
#> 0.3648 -0.6445 0.3805 0.3740 0.0244 0.4638 -0.3483 0.5682 1.1927 -1.3094
#> 0.4158 0.9726 0.4795 1.0425 -1.3095 -0.8837 0.5346 2.2874 -0.0492 0.3662
#> -1.0092 0.3766 1.3942 -0.0820 -0.6053 -0.0836 1.4400 -1.0055 0.5876 -0.1000
#> 1.6075 -0.5100 -0.5964 1.0486 -1.0101 0.0618 0.0472 -0.1109 0.3942 0.8128
#> 1.6979 -1.4036 -1.6428 0.6355 0.0120 0.8881 -0.4305 -1.8287 -0.0456 1.0808
#> 0.3322 -0.2066 -0.5081 -0.3836 1.1373 0.5725 0.4196 -1.9220 0.6122 -0.1045
#> 0.8598 -1.8472 0.6940 -0.8287 1.3070 1.2191 0.2685 1.5619 0.0417 0.4880
#> -0.7136 -0.3278 0.0781 0.9123 -1.5790 1.4979 0.7009 -0.0148 -0.3330 -1.0480
#> -0.5638 1.5685 -1.5352 -0.7965 0.5877 -0.3810 0.9705 0.8452 1.7031 -0.3664
#> -0.6798 -1.2743 -0.5399 -0.3281 1.3117 2.1113 1.3778 0.7570 -0.9750 -1.6793
#> -1.5096 -1.1139 2.1695 -1.2531 0.5720 -0.2353 0.4416 -0.4713 -0.6930 0.2072
#> 1.1462 -0.6429 -1.7308 -1.0293 0.4296 -1.0975 -1.0955 0.2649 -0.0580 -0.6128
#> 0.2922 -0.1939 -2.6399 -0.9203 0.3420 0.3659 0.1217 0.0736 0.0993 -1.6958
#> -0.3153 -0.1923 -0.4287 0.6218 0.2573 0.4796 1.4541 0.3598 1.6094 -0.2614
#> -0.2670 -1.2747 1.2125 0.7833 -0.8828 0.7027 0.9628 -0.0778 1.1711 -0.2686
#> -0.5945 -0.0006 -0.7443 -1.9199 -0.0499 -0.4802 -0.6031 -0.0402 0.2481 0.4819
#> 0.4739 -1.6936 -1.8370 0.0686 1.7653 0.0798 0.7916 -1.8765 -0.0145 -0.7405
#> 1.2357 -1.5596 -1.3789 0.2428 -1.8605 -0.5010 0.3907 -1.0441 -0.6823 0.2349
#> 0.0456 0.6355 -0.1976 -0.5344 -0.3145 0.4697 0.7779 -1.3616 -0.2708 -0.8309
#> -0.4043 -0.2614 1.4862 -0.4530 -0.3937 0.8272 -1.6846 0.0224 -0.7566 0.5623
#> 0.8132 1.2623 0.1362 1.1594 0.5356 0.8297 -0.4972 0.0952 0.5013 -0.5851
#> 0.9413 0.3696 0.0327 -1.5787 1.2504 0.8851 1.4210 2.1640 0.2996 0.5636
#> -0.3994 1.0721 -2.6220 0.2867 -0.8387 0.1687 -0.3834 0.3643 -0.7581 -1.3066
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]