
    JTh_O                         S SK r S SKJs  Jr  S SKJr  SSKJrJ	r	  / SQr
 " S S\	5      r " S S	\5      r " S
 S\\5      r " S S\5      r " S S\\5      r " S S\5      r " S S\\5      rg)    N)Tensor   )_LazyNormBase	_NormBase)InstanceNorm1dInstanceNorm2dInstanceNorm3dLazyInstanceNorm1dLazyInstanceNorm2dLazyInstanceNorm3dc                      ^  \ rS rSr      SS\S\S\S\S\SS4U 4S	 jjjrS
 rS r	S r
S rU 4S jrS\S\4S jrSrU =r$ )_InstanceNorm   Nnum_featuresepsmomentumaffinetrack_running_statsreturnc                 4   > XgS.n[         T	U ]  " XX4U40 UD6  g )N)devicedtype)super__init__)
selfr   r   r   r   r   r   r   factory_kwargs	__class__s
            U/var/www/auris/envauris/lib/python3.13/site-packages/torch/nn/modules/instancenorm.pyr   _InstanceNorm.__init__   s*     %+;x1D	
HV	
    c                     [         eNNotImplementedErrorr   inputs     r   _check_input_dim_InstanceNorm._check_input_dim%       !!r    c                     [         er"   r#   r   s    r   _get_no_batch_dim_InstanceNorm._get_no_batch_dim(   r)   r    c                 `    U R                  UR                  S5      5      R                  S5      $ )Nr   )_apply_instance_norm	unsqueezesqueezer%   s     r   _handle_no_batch_input$_InstanceNorm._handle_no_batch_input+   s'    (();<DDQGGr    c           
      4   [         R                  " UU R                  U R                  U R                  U R
                  U R                  =(       d    U R                  (       + U R                  b  U R                  U R                  5      $ SU R                  5      $ )Ng        )
Finstance_normrunning_meanrunning_varweightbiastrainingr   r   r   r%   s     r   r/   "_InstanceNorm._apply_instance_norm.   sy    KKIIMM9!9!99!]]6DMMHH	
 		
 =@HH	
 		
r    c           	        > UR                  SS 5      nUc  U R                  (       d  / n	S H  n
X*-   nX;   d  M  U	R                  U5        M!     [        U	5      S:  ac  UR                  SR	                  SR                  S U	 5       5      U R                  R                  S95        U	 H  nUR                  U5        M     [        TU ])  UUUUUUU5        g )Nversion)r7   r8   r   a  Unexpected running stats buffer(s) {names} for {klass} with track_running_stats=False. If state_dict is a checkpoint saved before 0.4.0, this may be expected because {klass} does not track running stats by default since 0.4.0. Please remove these keys from state_dict. If the running stats are actually needed, instead set track_running_stats=True in {klass} to enable them. See the documentation of {klass} for details.z and c              3   .   #    U  H  nS U S 3v   M     g7f)"N ).0ks     r   	<genexpr>6_InstanceNorm._load_from_state_dict.<locals>.<genexpr>W   s     *P=OQqc8=Os   )namesklass)getr   appendlenformatjoinr   __name__popr   _load_from_state_dict)r   
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsr>   running_stats_keysnamekeyr   s               r   rO   #_InstanceNorm._load_from_state_dict:   s     !$$Y5 ?4#;#;!#7m$&--c2 8 %&*!!@ AG%ll*P=O*PP"nn55 AG A .CNN3' . 	%	
r    r&   c           
         U R                  U5        UR                  5       U R                  5       -
  nUR                  U5      U R                  :w  aY  U R
                  (       a.  [        SU SU R                   SUR                  U5       S35      e[        R                  " SU S35        UR                  5       U R                  5       :X  a  U R                  U5      $ U R                  U5      $ )Nzexpected input's size at dim=z to match num_features (z), but got: .zinput's size at dim=z does not match num_features. You can silence this warning by not passing in num_features, which is not used because affine=False)r'   dimr,   sizer   r   
ValueErrorwarningswarnr2   r/   )r   r&   feature_dims      r   forward_InstanceNorm.forwardh   s    e$iikD$:$:$<<::k"d&7&77{{ 3K= A**+<

;8O7PPQS 
 *;- 8= = 99;$0022..u55((//r    rA   )gh㈵>g?FFNN)rM   
__module____qualname____firstlineno__intfloatboolr   r'   r,   r2   r/   rO   r   rc   __static_attributes____classcell__)r   s   @r   r   r      s     $)

 
 	

 
 "
 

 
""H

,
\0V 0 0 0r    r   c                   $    \ rS rSrSrS rS rSrg)r      a  Applies Instance Normalization.

This operation applies Instance Normalization
over a 2D (unbatched) or 3D (batched) input as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.

.. math::

    y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``.
The variance is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.

By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.

If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.

.. note::
    This :attr:`momentum` argument is different from one used in optimizer
    classes and the conventional notion of momentum. Mathematically, the
    update rule for running statistics here is
    :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
    where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
    new observed value.

.. note::
    :class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but
    have some subtle differences. :class:`InstanceNorm1d` is applied
    on each channel of channeled data like multidimensional time series, but
    :class:`LayerNorm` is usually applied on entire sample and often in NLP
    tasks. Additionally, :class:`LayerNorm` applies elementwise affine
    transform, while :class:`InstanceNorm1d` usually don't apply affine
    transform.

Args:
    num_features: number of features or channels :math:`C` of the input
    eps: a value added to the denominator for numerical stability. Default: 1e-5
    momentum: the value used for the running_mean and running_var computation. Default: 0.1
    affine: a boolean value that when set to ``True``, this module has
        learnable affine parameters, initialized the same way as done for batch normalization.
        Default: ``False``.
    track_running_stats: a boolean value that when set to ``True``, this
        module tracks the running mean and variance, and when set to ``False``,
        this module does not track such statistics and always uses batch
        statistics in both training and eval modes. Default: ``False``

Shape:
    - Input: :math:`(N, C, L)` or :math:`(C, L)`
    - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)

Examples::

    >>> # Without Learnable Parameters
    >>> m = nn.InstanceNorm1d(100)
    >>> # With Learnable Parameters
    >>> m = nn.InstanceNorm1d(100, affine=True)
    >>> input = torch.randn(20, 100, 40)
    >>> output = m(input)
c                     gN   rA   r+   s    r   r,    InstanceNorm1d._get_no_batch_dim       r    c                 f    UR                  5       S;  a  [        SUR                  5        S35      eg N)rq      zexpected 2D or 3D input (got D input)r]   r_   r%   s     r   r'   InstanceNorm1d._check_input_dim   0    99;f$<UYY[MRSS %r    rA   NrM   re   rf   rg   __doc__r,   r'   rk   rA   r    r   r   r      s    BHTr    r   c                   (    \ rS rSrSr\rS rS rSr	g)r
      a4  A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of the ``num_features`` argument.

The ``num_features`` argument of the :class:`InstanceNorm1d` is inferred from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`.

Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.

Args:
    num_features: :math:`C` from an expected input of size
        :math:`(N, C, L)` or :math:`(C, L)`
    eps: a value added to the denominator for numerical stability. Default: 1e-5
    momentum: the value used for the running_mean and running_var computation. Default: 0.1
    affine: a boolean value that when set to ``True``, this module has
        learnable affine parameters, initialized the same way as done for batch normalization.
        Default: ``False``.
    track_running_stats: a boolean value that when set to ``True``, this
        module tracks the running mean and variance, and when set to ``False``,
        this module does not track such statistics and always uses batch
        statistics in both training and eval modes. Default: ``False``

Shape:
    - Input: :math:`(N, C, L)` or :math:`(C, L)`
    - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
c                     grp   rA   r+   s    r   r,   $LazyInstanceNorm1d._get_no_batch_dim   rs   r    c                 f    UR                  5       S;  a  [        SUR                  5        S35      eg ru   rx   r%   s     r   r'   #LazyInstanceNorm1d._check_input_dim   rz   r    rA   N)
rM   re   rf   rg   r|   r   cls_to_becomer,   r'   rk   rA   r    r   r
   r
      s    4 #MTr    r
   c                   $    \ rS rSrSrS rS rSrg)r      aD  Applies Instance Normalization.

This operation applies Instance Normalization
over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.

.. math::

    y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.

By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.

If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.

.. note::
    This :attr:`momentum` argument is different from one used in optimizer
    classes and the conventional notion of momentum. Mathematically, the
    update rule for running statistics here is
    :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
    where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
    new observed value.

.. note::
    :class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but
    have some subtle differences. :class:`InstanceNorm2d` is applied
    on each channel of channeled data like RGB images, but
    :class:`LayerNorm` is usually applied on entire sample and often in NLP
    tasks. Additionally, :class:`LayerNorm` applies elementwise affine
    transform, while :class:`InstanceNorm2d` usually don't apply affine
    transform.

Args:
    num_features: :math:`C` from an expected input of size
        :math:`(N, C, H, W)` or :math:`(C, H, W)`
    eps: a value added to the denominator for numerical stability. Default: 1e-5
    momentum: the value used for the running_mean and running_var computation. Default: 0.1
    affine: a boolean value that when set to ``True``, this module has
        learnable affine parameters, initialized the same way as done for batch normalization.
        Default: ``False``.
    track_running_stats: a boolean value that when set to ``True``, this
        module tracks the running mean and variance, and when set to ``False``,
        this module does not track such statistics and always uses batch
        statistics in both training and eval modes. Default: ``False``

Shape:
    - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
    - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)

Examples::

    >>> # Without Learnable Parameters
    >>> m = nn.InstanceNorm2d(100)
    >>> # With Learnable Parameters
    >>> m = nn.InstanceNorm2d(100, affine=True)
    >>> input = torch.randn(20, 100, 35, 45)
    >>> output = m(input)
c                     gNrv   rA   r+   s    r   r,    InstanceNorm2d._get_no_batch_dim8  rs   r    c                 f    UR                  5       S;  a  [        SUR                  5        S35      eg N)rv      zexpected 3D or 4D input (got rw   rx   r%   s     r   r'   InstanceNorm2d._check_input_dim;  rz   r    rA   Nr{   rA   r    r   r   r      s    DLTr    r   c                   (    \ rS rSrSr\rS rS rSr	g)r   i@  aF  A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of the ``num_features`` argument.

The ``num_features`` argument of the :class:`InstanceNorm2d` is inferred from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.

Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.

Args:
    num_features: :math:`C` from an expected input of size
        :math:`(N, C, H, W)` or :math:`(C, H, W)`
    eps: a value added to the denominator for numerical stability. Default: 1e-5
    momentum: the value used for the running_mean and running_var computation. Default: 0.1
    affine: a boolean value that when set to ``True``, this module has
        learnable affine parameters, initialized the same way as done for batch normalization.
        Default: ``False``.
    track_running_stats: a boolean value that when set to ``True``, this
        module tracks the running mean and variance, and when set to ``False``,
        this module does not track such statistics and always uses batch
        statistics in both training and eval modes. Default: ``False``

Shape:
    - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
    - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
c                     gr   rA   r+   s    r   r,   $LazyInstanceNorm2d._get_no_batch_dim^  rs   r    c                 f    UR                  5       S;  a  [        SUR                  5        S35      eg r   rx   r%   s     r   r'   #LazyInstanceNorm2d._check_input_dima  rz   r    rA   N)
rM   re   rf   rg   r|   r   r   r,   r'   rk   rA   r    r   r   r   @      6 #MTr    r   c                   $    \ rS rSrSrS rS rSrg)r	   if  ad  Applies Instance Normalization.

This operation applies Instance Normalization
over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.

.. math::

    y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size C (where C is the input size) if :attr:`affine` is ``True``.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.

By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.

If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.

.. note::
    This :attr:`momentum` argument is different from one used in optimizer
    classes and the conventional notion of momentum. Mathematically, the
    update rule for running statistics here is
    :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
    where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
    new observed value.

.. note::
    :class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but
    have some subtle differences. :class:`InstanceNorm3d` is applied
    on each channel of channeled data like 3D models with RGB color, but
    :class:`LayerNorm` is usually applied on entire sample and often in NLP
    tasks. Additionally, :class:`LayerNorm` applies elementwise affine
    transform, while :class:`InstanceNorm3d` usually don't apply affine
    transform.

Args:
    num_features: :math:`C` from an expected input of size
        :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
    eps: a value added to the denominator for numerical stability. Default: 1e-5
    momentum: the value used for the running_mean and running_var computation. Default: 0.1
    affine: a boolean value that when set to ``True``, this module has
        learnable affine parameters, initialized the same way as done for batch normalization.
        Default: ``False``.
    track_running_stats: a boolean value that when set to ``True``, this
        module tracks the running mean and variance, and when set to ``False``,
        this module does not track such statistics and always uses batch
        statistics in both training and eval modes. Default: ``False``

Shape:
    - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
    - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)

Examples::

    >>> # Without Learnable Parameters
    >>> m = nn.InstanceNorm3d(100)
    >>> # With Learnable Parameters
    >>> m = nn.InstanceNorm3d(100, affine=True)
    >>> input = torch.randn(20, 100, 35, 45, 10)
    >>> output = m(input)
c                     gNr   rA   r+   s    r   r,    InstanceNorm3d._get_no_batch_dim  rs   r    c                 f    UR                  5       S;  a  [        SUR                  5        S35      eg N)r      zexpected 4D or 5D input (got rw   rx   r%   s     r   r'   InstanceNorm3d._check_input_dim  rz   r    rA   Nr{   rA   r    r   r	   r	   f  s    CJTr    r	   c                   (    \ rS rSrSr\rS rS rSr	g)r   i  aX  A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of the ``num_features`` argument.

The ``num_features`` argument of the :class:`InstanceNorm3d` is inferred from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.

Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.

Args:
    num_features: :math:`C` from an expected input of size
        :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
    eps: a value added to the denominator for numerical stability. Default: 1e-5
    momentum: the value used for the running_mean and running_var computation. Default: 0.1
    affine: a boolean value that when set to ``True``, this module has
        learnable affine parameters, initialized the same way as done for batch normalization.
        Default: ``False``.
    track_running_stats: a boolean value that when set to ``True``, this
        module tracks the running mean and variance, and when set to ``False``,
        this module does not track such statistics and always uses batch
        statistics in both training and eval modes. Default: ``False``

Shape:
    - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
    - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
c                     gr   rA   r+   s    r   r,   $LazyInstanceNorm3d._get_no_batch_dim  rs   r    c                 f    UR                  5       S;  a  [        SUR                  5        S35      eg r   rx   r%   s     r   r'   #LazyInstanceNorm3d._check_input_dim  rz   r    rA   N)
rM   re   rf   rg   r|   r	   r   r,   r'   rk   rA   r    r   r   r     r   r    r   )r`   torch.nn.functionalnn
functionalr5   torchr   	batchnormr   r   __all__r   r   r
   r   r   r	   r   rA   r    r   <module>r      s        /g0I g0TJT] JTZ"T "TJLT] LT^#T #TLKT] KT\#T #Tr    