
    JTh-                         S SK Jr  S SKJs  Jr  S SKJr  S SKJ	r	J
r
JrJr  SSKJr  / SQr " S S	\5      r " S
 S\5      r " S S\5      rg)    )OptionalN)Tensor)
_ratio_2_t_ratio_any_t	_size_2_t_size_any_t   )Module)UpsampleUpsamplingNearest2dUpsamplingBilinear2dc                      ^  \ rS rSr% Sr/ SQr\\S'   \\	   \S'   \\
   \S'   \\S'   \\   \S'   \\   \S	'        SS\\	   S\\
   S\S\\   S	\\   SS
4U 4S jjjrS\S\4S jrU 4S jrS\4S jrSrU =r$ )r      a:  Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.

The input data is assumed to be of the form
`minibatch x channels x [optional depth] x [optional height] x width`.
Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.

The algorithms available for upsampling are nearest neighbor and linear,
bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,
respectively.

One can either give a :attr:`scale_factor` or the target output :attr:`size` to
calculate the output size. (You cannot give both, as it is ambiguous)

Args:
    size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):
        output spatial sizes
    scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):
        multiplier for spatial size. Has to match input size if it is a tuple.
    mode (str, optional): the upsampling algorithm: one of ``'nearest'``,
        ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.
        Default: ``'nearest'``
    align_corners (bool, optional): if ``True``, the corner pixels of the input
        and output tensors are aligned, and thus preserving the values at
        those pixels. This only has effect when :attr:`mode` is
        ``'linear'``, ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``.
        Default: ``False``
    recompute_scale_factor (bool, optional): recompute the scale_factor for use in the
        interpolation calculation. If `recompute_scale_factor` is ``True``, then
        `scale_factor` must be passed in and `scale_factor` is used to compute the
        output `size`. The computed output `size` will be used to infer new scales for
        the interpolation. Note that when `scale_factor` is floating-point, it may differ
        from the recomputed `scale_factor` due to rounding and precision issues.
        If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will
        be used directly for interpolation.

Shape:
    - Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
    - Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
      or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where

.. math::
    D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor

.. math::
    H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor

.. math::
    W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor

.. warning::
    With ``align_corners = True``, the linearly interpolating modes
    (`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally
    align the output and input pixels, and thus the output values can depend
    on the input size. This was the default behavior for these modes up to
    version 0.3.1. Since then, the default behavior is
    ``align_corners = False``. See below for concrete examples on how this
    affects the outputs.

.. note::
    If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`.

Examples::

    >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
    >>> input
    tensor([[[[1., 2.],
              [3., 4.]]]])

    >>> m = nn.Upsample(scale_factor=2, mode='nearest')
    >>> m(input)
    tensor([[[[1., 1., 2., 2.],
              [1., 1., 2., 2.],
              [3., 3., 4., 4.],
              [3., 3., 4., 4.]]]])

    >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
    >>> m = nn.Upsample(scale_factor=2, mode='bilinear')  # align_corners=False
    >>> m(input)
    tensor([[[[1.0000, 1.2500, 1.7500, 2.0000],
              [1.5000, 1.7500, 2.2500, 2.5000],
              [2.5000, 2.7500, 3.2500, 3.5000],
              [3.0000, 3.2500, 3.7500, 4.0000]]]])

    >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
    >>> m(input)
    tensor([[[[1.0000, 1.3333, 1.6667, 2.0000],
              [1.6667, 2.0000, 2.3333, 2.6667],
              [2.3333, 2.6667, 3.0000, 3.3333],
              [3.0000, 3.3333, 3.6667, 4.0000]]]])

    >>> # Try scaling the same data in a larger tensor
    >>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3)
    >>> input_3x3[:, :, :2, :2].copy_(input)
    tensor([[[[1., 2.],
              [3., 4.]]]])
    >>> input_3x3
    tensor([[[[1., 2., 0.],
              [3., 4., 0.],
              [0., 0., 0.]]]])

    >>> # xdoctest: +IGNORE_WANT("seems to fail when other tests are run in the same session")
    >>> m = nn.Upsample(scale_factor=2, mode='bilinear')  # align_corners=False
    >>> # Notice that values in top left corner are the same with the small input (except at boundary)
    >>> m(input_3x3)
    tensor([[[[1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000],
              [1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000],
              [2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000],
              [2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000],
              [0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000],
              [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])

    >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
    >>> # Notice that values in top left corner are now changed
    >>> m(input_3x3)
    tensor([[[[1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
              [1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
              [2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
              [2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
              [1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
              [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
)sizescale_factormodealign_cornersnamerecompute_scale_factorr   r   r   r   r   r   Nreturnc                   > [         TU ]  5         [        U 5      R                  U l        Xl        [        U[        5      (       a  [        S U 5       5      U l        OU(       a  [        U5      OS U l        X0l
        X@l        XPl        g )Nc              3   8   #    U  H  n[        U5      v   M     g 7f)N)float).0factors     S/var/www/auris/envauris/lib/python3.13/site-packages/torch/nn/modules/upsampling.py	<genexpr>$Upsample.__init__.<locals>.<genexpr>   s     %O,eFmm,s   )super__init__type__name__r   r   
isinstancetupler   r   r   r   r   )selfr   r   r   r   r   	__class__s         r   r    Upsample.__init__   sh     	J''		lE** %%O,%O OD7Cl 3D	*&<#    inputc           	          [         R                  " UU R                  U R                  U R                  U R
                  U R                  S9$ )N)r   )Finterpolater   r   r   r   r   )r%   r)   s     r   forwardUpsample.forward   s?    }}IIII#'#>#>
 	
r(   c                 :   > SU;  a  SUS'   [         TU ]  U5        g )Nr   T)r   __setstate__)r%   stater&   s     r   r0   Upsample.__setstate__   s#    #50.2E*+U#r(   c                     U R                   b  S[        U R                   5      -   nOS[        U R                  5      -   nUS[        U R                  5      -   -  nU$ )Nzscale_factor=zsize=z, mode=)r   reprr   r   )r%   infos     r   
extra_reprUpsample.extra_repr   sQ    ("T$*;*;%<<DT$))_,D	DO++r(   )r   r   r   r   r   r   )NNnearestNN)r"   
__module____qualname____firstlineno____doc____constants__str__annotations__r   r   r   boolr    r   r-   r0   r6   __static_attributes____classcell__r&   s   @r   r   r      s    xtM I
;
<((
ID>!$TN* '+/3(,15={#= |,= 	=
  ~= !)= 
= =&
V 
 
$C  r(   r   c                   P   ^  \ rS rSrSr  S	S\\   S\\   SS4U 4S jjjrSr	U =r
$ )
r      a  Applies a 2D nearest neighbor upsampling to an input signal composed of several input channels.

To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.

When :attr:`size` is given, it is the output size of the image `(h, w)`.

Args:
    size (int or Tuple[int, int], optional): output spatial sizes
    scale_factor (float or Tuple[float, float], optional): multiplier for
        spatial size.

.. warning::
    This class is deprecated in favor of :func:`~nn.functional.interpolate`.

Shape:
    - Input: :math:`(N, C, H_{in}, W_{in})`
    - Output: :math:`(N, C, H_{out}, W_{out})` where

.. math::
      H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor

.. math::
      W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor

Examples::

    >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
    >>> input
    tensor([[[[1., 2.],
              [3., 4.]]]])

    >>> m = nn.UpsamplingNearest2d(scale_factor=2)
    >>> m(input)
    tensor([[[[1., 1., 2., 2.],
              [1., 1., 2., 2.],
              [3., 3., 4., 4.],
              [3., 3., 4., 4.]]]])
Nr   r   r   c                 "   > [         TU ]  XSS9  g )Nr8   )r   r   r    r%   r   r   r&   s      r   r    UpsamplingNearest2d.__init__   s    
 	)<r(    NNr"   r9   r:   r;   r<   r   r   r   r    rA   rB   rC   s   @r   r   r      sA    &T %)-1=y!= z*= 
	= =r(   r   c                   P   ^  \ rS rSrSr  S	S\\   S\\   SS4U 4S jjjrSr	U =r
$ )
r      a|  Applies a 2D bilinear upsampling to an input signal composed of several input channels.

To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.

When :attr:`size` is given, it is the output size of the image `(h, w)`.

Args:
    size (int or Tuple[int, int], optional): output spatial sizes
    scale_factor (float or Tuple[float, float], optional): multiplier for
        spatial size.

.. warning::
    This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is
    equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.

Shape:
    - Input: :math:`(N, C, H_{in}, W_{in})`
    - Output: :math:`(N, C, H_{out}, W_{out})` where

.. math::
    H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor

.. math::
    W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor

Examples::

    >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
    >>> input
    tensor([[[[1., 2.],
              [3., 4.]]]])

    >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
    >>> m = nn.UpsamplingBilinear2d(scale_factor=2)
    >>> m(input)
    tensor([[[[1.0000, 1.3333, 1.6667, 2.0000],
              [1.6667, 2.0000, 2.3333, 2.6667],
              [2.3333, 2.6667, 3.0000, 3.3333],
              [3.0000, 3.3333, 3.6667, 4.0000]]]])
Nr   r   r   c                 $   > [         TU ]  XSSS9  g )NbilinearT)r   r   rG   rH   s      r   r    UpsamplingBilinear2d.__init__   s    
 	*DQr(   rJ   rK   rL   rC   s   @r   r   r      sF    (X %)-1Ry!R z*R 
	R Rr(   r   )typingr   torch.nn.functionalnn
functionalr+   torchr   torch.nn.common_typesr   r   r   r   moduler
   __all__r   r   r   rJ   r(   r   <module>rZ      sL        R R  Fsv sl.=( .=b0R8 0Rr(   