
    h5                        S SK r S SKJr  S SKJrJrJr  S SKrS SKJr  SSK	J
r
Jr  \R                  R                  R                  r " S S\R                  R                  5      r " S	 S
\R                  R"                  5      r " S S\5      r " S S\5      r " S S\R                  R                  5      r " S S\R                  R"                  5      r " S S\R                  R                  5      rg)    N)Sequence)CallableOptionalUnion)Tensor   )_log_api_usage_once_make_ntuplec                      ^  \ rS rSrSr SS\S\4U 4S jjjrS\S\	S\S	\
S
\\	   S\\	   S\\	   4U 4S jjrS\S\4S jrS\	4S jrSrU =r$ )FrozenBatchNorm2d   a  
BatchNorm2d where the batch statistics and the affine parameters are fixed

Args:
    num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
    eps (float): a value added to the denominator for numerical stability. Default: 1e-5
num_featuresepsc                 t  > [         TU ]  5         [        U 5        X l        U R	                  S[
        R                  " U5      5        U R	                  S[
        R                  " U5      5        U R	                  S[
        R                  " U5      5        U R	                  S[
        R                  " U5      5        g )Nweightbiasrunning_meanrunning_var)super__init__r	   r   register_buffertorchoneszeros)selfr   r   	__class__s      L/var/www/auris/envauris/lib/python3.13/site-packages/torchvision/ops/misc.pyr   FrozenBatchNorm2d.__init__   s    
 	D!Xuzz,'?@VU[[%>?^U[[-FG]EJJ|,DE    
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsc           	      B   > US-   nX;   a  X	 [         T	U ]  XX4XVU5        g )Nnum_batches_tracked)r   _load_from_state_dict)
r   r    r!   r"   r#   r$   r%   r&   num_batches_tracked_keyr   s
            r   r)   'FrozenBatchNorm2d._load_from_state_dict$   s4     #)+@"@"03%Wa	
r   xreturnc                 L   U R                   R                  SSSS5      nU R                  R                  SSSS5      nU R                  R                  SSSS5      nU R                  R                  SSSS5      nX$U R
                  -   R                  5       -  nX5U-  -
  nX-  U-   $ )N   )r   reshaper   r   r   r   rsqrt)r   r,   wbrvrmscaler   s           r   forwardFrozenBatchNorm2d.forward6   s     KK2q!,IIaQ*%%aQ2&&q"a3$((]))++:~y4r   c                     U R                   R                   SU R                  R                  S    SU R                   S3$ )N(r   z, eps=))r   __name__r   shaper   )r   s    r   __repr__FrozenBatchNorm2d.__repr__A   s;    ..))*!DKK,=,=a,@+AzQRSSr   )r   )gh㈵>)r=   
__module____qualname____firstlineno____doc__intfloatr   dictstrboollistr)   r   r8   r?   __static_attributes____classcell__r   s   @r   r   r      s     FF F F

 
 	

 
 3i
 c
 I
$	  	 F 	 T# T Tr   r   c                     ^  \ rS rSrSSSS\R
                  R                  \R
                  R                  SSS\R
                  R                  4
S\	S\	S\
\	\\	S	4   4   S
\
\	\\	S	4   4   S\\
\	\\	S	4   \4      S\	S\\S	\R
                  R                  4      S\\S	\R
                  R                  4      S\
\	\\	S	4   4   S\\   S\\   S\S	\R
                  R                  4   SS4U 4S jjjrSrU =r$ )ConvNormActivationE      r/   NTin_channelsout_channelskernel_size.stridepaddinggroups
norm_layeractivation_layerdilationinplacer   
conv_layerr-   c                 v  >^^	 Uc  [        T[        5      (       a!  [        T	[        5      (       a  TS-
  S-  T	-  nOb[        T[        5      (       a  [        T5      O
[        T	5      n[	        TU5      m[	        T	U5      m	[        U	U4S j[        U5       5       5      nUc  US L nU" UUTUUT	UUS9/nUb  UR                  U" U5      5        Ub   U
c  0 OSU
0nUR                  U" S0 UD65        [        TU ]$  " U6   [        U 5        X l        U R                  [        :X  a  [        R                  " S5        g g )Nr/   r   c              3   F   >#    U  H  nTU   S -
  S-  TU   -  v   M     g7f)r/   r   N ).0irZ   rT   s     r   	<genexpr>.ConvNormActivation.__init__.<locals>.<genexpr>]   s*     bQaAQ!!3 9HQK GQas   !)rZ   rW   r   r[   zhDon't use ConvNormActivation directly, please use Conv2dNormActivation and Conv3dNormActivation instead.r_   )
isinstancerE   r   lenr
   tuplerangeappendr   r   r	   rS   r   rO   warningswarn)r   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r   r\   	_conv_dimlayersparamsr   s      `     `      r   r   ConvNormActivation.__init__F   s<     ?+s++
8S0I0I&?q08;0:;0Q0QC,WZ[cWd	*;	B')<bQVW`Qabb<%D !	
 !MM*\23'"?RG0DFMM*4V45&!D!(>>//MMz 0r   )rS   )r=   rA   rB   rC   r   nnBatchNorm2dReLUConv2drE   r   rf   r   rH   r   ModulerI   r   rK   rL   rM   s   @r   rO   rO   E   s^   
 45./>B?Dxx?S?SEJXX]]01"&#5:XX__55 5 3c3h/0	5
 c5c?*+5 %U38_c 9:;5 5 Xc588??&:;<5 #8C,@#AB5 U38_,-5 $5 tn5 S%((//125 
5 5r   rO   c                     ^  \ rS rSrSrSSSS\R                  R                  \R                  R                  SSS4	S\	S\	S	\
\	\\	\	4   4   S
\
\	\\	\	4   4   S\\
\	\\	\	4   \4      S\	S\\S\R                  R                  4      S\\S\R                  R                  4      S\
\	\\	\	4   4   S\\   S\\   SS4U 4S jjjrSrU =r$ )Conv2dNormActivation~   a  
Configurable block used for Convolution2d-Normalization-Activation blocks.

Args:
    in_channels (int): Number of channels in the input image
    out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
    kernel_size: (int, optional): Size of the convolving kernel. Default: 3
    stride (int, optional): Stride of the convolution. Default: 1
    padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
    groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
    norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm2d``
    activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
    dilation (int): Spacing between kernel elements. Default: 1
    inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
    bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.

rQ   r/   NTrR   rS   rT   rU   rV   rW   rX   .rY   rZ   r[   r   r-   c                 j   > [         TU ]  UUUUUUUUU	U
U[        R                  R                  5        g N)r   r   r   ro   rr   r   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r   r   s               r   r   Conv2dNormActivation.__init__   >     	HHOO	
r   r_   )r=   rA   rB   rC   rD   r   ro   rp   rq   rE   r   rf   r   rH   r   rs   rI   r   rK   rL   rM   s   @r   ru   ru   ~   s<   , 45./>B?Dxx?S?SEJXX]]01"&#

 
 3c3h/0	

 c5c?*+
 %U38_c 9:;
 
 Xc588??&:;<
 #8C,@#AB
 U38_,-
 $
 tn
 

 
r   ru   c                     ^  \ rS rSrSrSSSS\R                  R                  \R                  R                  SSS4	S\	S\	S	\
\	\\	\	\	4   4   S
\
\	\\	\	\	4   4   S\\
\	\\	\	\	4   \4      S\	S\\S\R                  R                  4      S\\S\R                  R                  4      S\
\	\\	\	\	4   4   S\\   S\\   SS4U 4S jjjrSrU =r$ )Conv3dNormActivation   a  
Configurable block used for Convolution3d-Normalization-Activation blocks.

Args:
    in_channels (int): Number of channels in the input video.
    out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
    kernel_size: (int, optional): Size of the convolving kernel. Default: 3
    stride (int, optional): Stride of the convolution. Default: 1
    padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
    groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
    norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm3d``
    activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
    dilation (int): Spacing between kernel elements. Default: 1
    inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
    bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
rQ   r/   NTrR   rS   rT   rU   rV   rW   rX   .rY   rZ   r[   r   r-   c                 j   > [         TU ]  UUUUUUUUU	U
U[        R                  R                  5        g rx   )r   r   r   ro   Conv3dry   s               r   r   Conv3dNormActivation.__init__   r{   r   r_   )r=   rA   rB   rC   rD   r   ro   BatchNorm3drq   rE   r   rf   r   rH   r   rs   rI   r   rK   rL   rM   s   @r   r}   r}      sH   * 9:34CG?Dxx?S?SEJXX]]56"&#

 
 3c3m 445	

 c5c3//0
 %U3S=%93 >?@
 
 Xc588??&:;<
 #8C,@#AB
 U3S=112
 $
 tn
 

 
r   r}   c                   $  ^  \ rS rSrSr\R                  R                  \R                  R                  4S\	S\	S\
S\R                  R                  4   S\
S\R                  R                  4   SS	4
U 4S
 jjjrS\S\4S jrS\S\4S jrSrU =r$ )SqueezeExcitation   a%  
This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1).
Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in eq. 3.

Args:
    input_channels (int): Number of channels in the input image
    squeeze_channels (int): Number of squeeze channels
    activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU``
    scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid``
input_channelssqueeze_channels
activation.scale_activationr-   Nc                 D  > [         TU ]  5         [        U 5        [        R                  R                  S5      U l        [        R                  R                  XS5      U l        [        R                  R                  X!S5      U l	        U" 5       U l
        U" 5       U l        g )Nr/   )r   r   r	   r   ro   AdaptiveAvgPool2davgpoolrr   fc1fc2r   r   )r   r   r   r   r   r   s        r   r   SqueezeExcitation.__init__   so     	D!xx11!488??>QG88??#3QG$, 0 2r   inputc                     U R                  U5      nU R                  U5      nU R                  U5      nU R                  U5      nU R	                  U5      $ rx   )r   r   r   r   r   r   r   r7   s      r   _scaleSqueezeExcitation._scale   sI    U#&$$U++r   c                 ,    U R                  U5      nX!-  $ rx   )r   r   s      r   r8   SqueezeExcitation.forward  s    E"}r   )r   r   r   r   r   )r=   rA   rB   rC   rD   r   ro   rq   SigmoidrE   r   rs   r   r   r   r8   rK   rL   rM   s   @r   r   r      s    	 6;XX]];@88;K;K33 3 S%((//12	3
 #3#783 
3 3,F ,v ,V   r   r   c                      ^  \ rS rSrSrS\R                  R                  SSS4S\S\	\   S\
\S	\R                  R                  4      S
\
\S	\R                  R                  4      S\
\   S\S\4U 4S jjjrSrU =r$ )MLPi  a  This block implements the multi-layer perceptron (MLP) module.

Args:
    in_channels (int): Number of channels of the input
    hidden_channels (List[int]): List of the hidden channel dimensions
    norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the linear layer. If ``None`` this layer won't be used. Default: ``None``
    activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
    inplace (bool, optional): Parameter for the activation layer, which can optionally do the operation in-place.
        Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
    bias (bool): Whether to use bias in the linear layer. Default ``True``
    dropout (float): The probability for the dropout layer. Default: 0.0
NTg        rR   hidden_channelsrX   .rY   r[   r   dropoutc           	      H  > Uc  0 OSU0n/ n	Un
US S  H  nU	R                  [        R                  R                  XUS95        Ub  U	R                  U" U5      5        U	R                  U" S0 UD65        U	R                  [        R                  R                  " U40 UD65        Un
M     U	R                  [        R                  R                  XS   US95        U	R                  [        R                  R                  " U40 UD65        [
        TU ]  " U	6   [        U 5        g )Nr[   r0   )r   r_   )rh   r   ro   LinearDropoutr   r   r	   )r   rR   r   rX   rY   r[   r   r   rm   rl   in_dim
hidden_dimr   s               r   r   MLP.__init__  s     Y,@)#2.JMM%((//&4/HI%j45MM*4V45MM%((**7=f=>F / 	ehhoofb.AoMNehh&&w9&9:&!D!r   r_   )r=   rA   rB   rC   rD   r   ro   rq   rE   rJ   r   r   rs   rI   rF   r   rK   rL   rM   s   @r   r   r     s    " @DEJXX]]"&"" c" Xc588??&:;<	"
 #8C,@#AB" $" " " "r   r   c                   L   ^  \ rS rSrSrS\\   4U 4S jjrS\S\4S jr	Sr
U =r$ )	Permutei5  zThis module returns a view of the tensor input with its dimensions permuted.

Args:
    dims (List[int]): The desired ordering of dimensions
dimsc                 .   > [         TU ]  5         Xl        g rx   )r   r   r   )r   r   r   s     r   r   Permute.__init__<  s    	r   r,   r-   c                 B    [         R                  " XR                  5      $ rx   )r   permuter   )r   r,   s     r   r8   Permute.forward@  s    }}Q		**r   )r   )r=   rA   rB   rC   rD   rJ   rE   r   r   r8   rK   rL   rM   s   @r   r   r   5  s0    T#Y + +F + +r   r   )ri   collections.abcr   typingr   r   r   r   r   utilsr	   r
   ro   
functionalinterpolaters   r   
SequentialrO   ru   r}   r   r   r   r_   r   r   <module>r      s     $ , ,   5 hh!!--4T 4Tn6,, 6r/
- /
d.
- .
b$ $N*"%((

 *"Z+ehhoo +r   