o
    Zh T                     @   s  d Z ddlZddlmZmZmZ ddlZddlZddlmZ ddl	m
Z
mZmZ ddlmZ ddlmZmZmZ dd	lmZ dd
lmZmZ ddlmZ eeZdedefddZd/deeef defddZ G dd dej!Z"G dd dej#Z$G dd dej!Z%G dd dej!Z&G dd dej!Z'G d d! d!ej!Z(G d"d# d#ej!Z)G d$d% d%ej!Z*eG d&d' d'eZ+eG d(d) d)e+Z,ed*d+G d,d- d-e+Z-g d.Z.dS )0zPyTorch EfficientNet model.    N)OptionalTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputWithNoAttention(BaseModelOutputWithPoolingAndNoAttention$ImageClassifierOutputWithNoAttention)PreTrainedModel)auto_docstringlogging   )EfficientNetConfigconfignum_channelsc                 C   sJ   | j }|| j9 }t|t||d  | | }|d| k r!||7 }t|S )z<
    Round number of filters based on depth multiplier.
       g?)Zdepth_divisorZwidth_coefficientmaxint)r   r   ZdivisorZnew_dim r   e/var/www/auris/lib/python3.10/site-packages/transformers/models/efficientnet/modeling_efficientnet.pyround_filters'   s   
r   Tkernel_sizeadjustc                 C   sn   t | tr	| | f} | d d | d d f}|r)|d d |d |d d |d fS |d |d |d |d fS )aJ  
    Utility function to get the tuple padding value for the depthwise convolution.

    Args:
        kernel_size (`int` or `tuple`):
            Kernel size of the convolution layers.
        adjust (`bool`, *optional*, defaults to `True`):
            Adjusts padding value to apply to right and bottom sides of the input.
    r   r   r   )
isinstancer   )r   r   Zcorrectr   r   r   correct_pad6   s   

$r   c                       s<   e Zd ZdZdef fddZdejdejfddZ  Z	S )	EfficientNetEmbeddingszL
    A module that corresponds to the stem module of the original work.
    r   c                    sh   t    t|d| _tjdd| _tj|j| jddddd| _	tj
| j|j|jd	| _t|j | _d S )
N    )r   r   r   r   paddingr	   r   validFr   strider"   bias)epsmomentum)super__init__r   out_dimr   	ZeroPad2dr"   Conv2dr   convolutionBatchNorm2dbatch_norm_epsbatch_norm_momentum	batchnormr
   
hidden_act
activationselfr   	__class__r   r   r*   O   s   
zEfficientNetEmbeddings.__init__pixel_valuesreturnc                 C   s,   |  |}| |}| |}| |}|S N)r"   r.   r2   r4   )r6   r9   featuresr   r   r   forwardZ   s
   



zEfficientNetEmbeddings.forward)
__name__
__module____qualname____doc__r   r*   torchTensorr=   __classcell__r   r   r7   r   r   J   s    r   c                       s,   e Zd Z							d fdd	Z  ZS )	EfficientNetDepthwiseConv2dr   r	   r   Tzerosc	           
         s*   || }	t  j||	|||||||d	 d S )N)	in_channelsout_channelsr   r%   r"   dilationgroupsr&   padding_mode)r)   r*   )
r6   rG   Zdepth_multiplierr   r%   r"   rI   r&   rK   rH   r7   r   r   r*   d   s   
z$EfficientNetDepthwiseConv2d.__init__)r   r	   r   r   r   TrF   )r>   r?   r@   r*   rD   r   r   r7   r   rE   c   s    rE   c                       sH   e Zd ZdZdedededef fddZdejd	ej	fd
dZ
  ZS )EfficientNetExpansionLayerz_
    This corresponds to the expansion phase of each block in the original implementation.
    r   in_dimr+   r%   c                    sB   t    tj||dddd| _tj||jd| _t|j	 | _
d S )Nr   sameFrG   rH   r   r"   r&   )num_featuresr'   )r)   r*   r   r-   expand_convr/   r0   	expand_bnr
   r3   
expand_act)r6   r   rM   r+   r%   r7   r   r   r*      s   
z#EfficientNetExpansionLayer.__init__hidden_statesr:   c                 C   s"   |  |}| |}| |}|S r;   )rQ   rR   rS   r6   rT   r   r   r   r=      s   


z"EfficientNetExpansionLayer.forward)r>   r?   r@   rA   r   r   r*   rB   FloatTensorrC   r=   rD   r   r   r7   r   rL   }   s    rL   c                
       sL   e Zd ZdZdededededef
 fddZd	ej	d
ej
fddZ  ZS )EfficientNetDepthwiseLayerzk
    This corresponds to the depthwise convolution phase of each block in the original implementation.
    r   rM   r%   r   adjust_paddingc                    sv   t    || _| jdkrdnd}t||d}tj|d| _t||||dd| _tj	||j
|jd| _t|j | _d S )	Nr   r#   rN   )r   r!   Fr$   rP   r'   r(   )r)   r*   r%   r   r   r,   depthwise_conv_padrE   depthwise_convr/   r0   r1   depthwise_normr
   r3   depthwise_act)r6   r   rM   r%   r   rX   Zconv_padr"   r7   r   r   r*      s   


z#EfficientNetDepthwiseLayer.__init__rT   r:   c                 C   s6   | j dkr
| |}| |}| |}| |}|S )Nr   )r%   rZ   r[   r\   r]   rU   r   r   r   r=      s   




z"EfficientNetDepthwiseLayer.forwardr>   r?   r@   rA   r   r   boolr*   rB   rV   rC   r=   rD   r   r   r7   r   rW      s    rW   c                	       sJ   e Zd ZdZddedededef fddZd	ej	d
ej
fddZ  ZS )EfficientNetSqueezeExciteLayerzl
    This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
    Fr   rM   
expand_dimexpandc                    s   t    |r	|n|| _tdt||j | _tjdd| _	tj
| j| jddd| _tj
| j| jddd| _t|j | _t | _d S )Nr   )Zoutput_sizerN   )rG   rH   r   r"   )r)   r*   dimr   r   Zsqueeze_expansion_ratioZdim_ser   ZAdaptiveAvgPool2dsqueezer-   reducerb   r
   r3   
act_reduceZSigmoid
act_expand)r6   r   rM   ra   rb   r7   r   r   r*      s$   
z'EfficientNetSqueezeExciteLayer.__init__rT   r:   c                 C   sF   |}|  |}| |}| |}| |}| |}t||}|S r;   )rd   re   rf   rb   rg   rB   mul)r6   rT   Zinputsr   r   r   r=      s   




z&EfficientNetSqueezeExciteLayer.forward)Fr^   r   r   r7   r   r`      s     r`   c                       sV   e Zd ZdZdedededededef fdd	Zd
e	j
de	j
de	jfddZ  ZS )EfficientNetFinalBlockLayerz[
    This corresponds to the final phase of each block in the original implementation.
    r   rM   r+   r%   	drop_rateid_skipc                    sX   t    |dko| | _tj||dddd| _tj||j|jd| _	tj
|d| _d S )Nr   rN   FrO   rY   p)r)   r*   apply_dropoutr   r-   project_convr/   r0   r1   
project_bnDropoutdropout)r6   r   rM   r+   r%   rj   rk   r7   r   r   r*      s   

z$EfficientNetFinalBlockLayer.__init__
embeddingsrT   r:   c                 C   s0   |  |}| |}| jr| |}|| }|S r;   )ro   rp   rn   rr   )r6   rs   rT   r   r   r   r=      s   


z#EfficientNetFinalBlockLayer.forwardr>   r?   r@   rA   r   r   floatr_   r*   rB   rV   rC   r=   rD   r   r   r7   r   ri      s     $ri   c                       s\   e Zd ZdZdededededededed	ed
ef fddZde	j
de	jfddZ  ZS )EfficientNetBlocka  
    This corresponds to the expansion and depthwise convolution phase of each block in the original implementation.

    Args:
        config ([`EfficientNetConfig`]):
            Model configuration class.
        in_dim (`int`):
            Number of input channels.
        out_dim (`int`):
            Number of output channels.
        stride (`int`):
            Stride size to be used in convolution layers.
        expand_ratio (`int`):
            Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
        kernel_size (`int`):
            Kernel size for the depthwise convolution layer.
        drop_rate (`float`):
            Dropout rate to be used in the final phase of each block.
        id_skip (`bool`):
            Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
            of each block. Set to `True` for the first block of each stage.
        adjust_padding (`bool`):
            Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
            operation, set to `True` for inputs with odd input sizes.
    r   rM   r+   r%   expand_ratior   rj   rk   rX   c
                    s   t    || _| jdkrdnd| _|| }
| jr"t|||
|d| _t|| jr)|
n||||	d| _t|||
| jd| _	t
|| jrB|
n|||||d| _d S )Nr   TF)r   rM   r+   r%   )r   rM   r%   r   rX   )r   rM   ra   rb   )r   rM   r+   r%   rj   rk   )r)   r*   rw   rb   rL   	expansionrW   r[   r`   squeeze_exciteri   
projection)r6   r   rM   r+   r%   rw   r   rj   rk   rX   Zexpand_in_dimr7   r   r   r*   !  s4   

zEfficientNetBlock.__init__rT   r:   c                 C   s<   |}| j dkr| |}| |}| |}| ||}|S )Nr   )rw   rx   r[   ry   rz   )r6   rT   rs   r   r   r   r=   J  s   



zEfficientNetBlock.forwardrt   r   r   r7   r   rv     s,    	
)rv   c                	       sP   e Zd ZdZdef fddZ		ddejdee	 d	ee	 d
e
fddZ  ZS )EfficientNetEncoderz
    Forward propagates the embeddings through each EfficientNet block.

    Args:
        config ([`EfficientNetConfig`]):
            Model configuration class.
    r   c                    s~  t    |_|j_fdd t|j}t fdd|jD }d}g }t|D ]k}t	||j| }t	||j
| }|j| }	|j| }
|j| }t |j| D ]@}|dkr]dnd}|dkredn|	}	|dkrm|n|}||jv rvdnd}|j| | }t||||	|
||||d		}|| |d7 }qUq+t|_tj|t	|d
dddd_tj|j|j|jd_t|j _d S )Nc                    s   t t j|  S r;   )r   mathceildepth_coefficient)Zrepeats)r6   r   r   round_repeatse  s   z3EfficientNetEncoder.__init__.<locals>.round_repeatsc                 3   s    | ]} |V  qd S r;   r   ).0n)r   r   r   	<genexpr>j  s    z/EfficientNetEncoder.__init__.<locals>.<genexpr>r   TFr   )	r   rM   r+   r%   r   rw   rj   rk   rX   i   rN   rO   rY   )r)   r*   r   r~   lenrG   sumZnum_block_repeatsranger   rH   stridesZkernel_sizesZexpand_ratiosZdepthwise_paddingZdrop_connect_raterv   appendr   Z
ModuleListblocksr-   top_convr/   
hidden_dimr0   r1   top_bnr
   r3   top_activation)r6   r   Znum_base_blocksZ
num_blocksZcurr_block_numr   irM   r+   r%   r   rw   jrk   rX   rj   blockr7   )r   r6   r   r*   `  s\   






zEfficientNetEncoder.__init__FTrT   output_hidden_statesreturn_dictr:   c                 C   st   |r|fnd }| j D ]}||}|r||f7 }q
| |}| |}| |}|s4tdd ||fD S t||dS )Nc                 s   s    | ]	}|d ur|V  qd S r;   r   )r   vr   r   r   r     s    z.EfficientNetEncoder.forward.<locals>.<genexpr>)last_hidden_staterT   )r   r   r   r   tupler   )r6   rT   r   r   Zall_hidden_statesr   r   r   r   r=     s   




zEfficientNetEncoder.forward)FT)r>   r?   r@   rA   r   r*   rB   rV   r   r_   r   r=   rD   r   r   r7   r   r{   W  s    :r{   c                   @   s$   e Zd ZeZdZdZg Zdd ZdS )EfficientNetPreTrainedModelefficientnetr9   c                 C   st   t |tjtjfr#|jjjd| jjd |j	dur!|j	j
  dS dS t |tjr8|j	j
  |jjd dS dS )zInitialize the weightsg        )meanZstdNg      ?)r   r   Linearr-   weightdataZnormal_r   Zinitializer_ranger&   Zzero_Z	LayerNormZfill_)r6   moduler   r   r   _init_weights  s   
z)EfficientNetPreTrainedModel._init_weightsN)	r>   r?   r@   r   Zconfig_classZbase_model_prefixZmain_input_nameZ_no_split_modulesr   r   r   r   r   r     s    r   c                       s^   e Zd Zdef fddZe			ddeej dee	 dee	 de
eef fd	d
Z  ZS )EfficientNetModelr   c                    s~   t  | || _t|| _t|| _|jdkr"tj	|j
dd| _n|jdkr1tj|j
dd| _ntd|j |   d S )Nr   T)Z	ceil_moder   z2config.pooling must be one of ['mean', 'max'] got )r)   r*   r   r   rs   r{   encoderZpooling_typer   Z	AvgPool2dr   poolerZ	MaxPool2d
ValueErrorZpooling	post_initr5   r7   r   r   r*     s   



zEfficientNetModel.__init__Nr9   r   r   r:   c                 C   s   |d ur|n| j j}|d ur|n| j j}|d u rtd| |}| j|||d}|d }| |}||jd d }|sH||f|dd   S t	|||j
dS )Nz You have to specify pixel_valuesr   r   r   r   r   )r   pooler_outputrT   )r   r   use_return_dictr   rs   r   r   Zreshapeshaper   rT   )r6   r9   r   r   Zembedding_outputZencoder_outputsr   pooled_outputr   r   r   r=     s*   

zEfficientNetModel.forward)NNN)r>   r?   r@   r   r*   r   r   rB   rV   r_   r   r   r   r=   rD   r   r   r7   r   r     s    
r   z
    EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g.
    for ImageNet.
    )Zcustom_introc                       sd   e Zd Z fddZe				ddeej deej dee	 dee	 de
eef f
d	d
Z  ZS )"EfficientNetForImageClassificationc                    sd   t  | |j| _|| _t|| _tj|jd| _	| jdkr't
|j| jnt | _|   d S )Nrl   r   )r)   r*   
num_labelsr   r   r   r   rq   Zdropout_raterr   r   r   ZIdentity
classifierr   r5   r7   r   r   r*     s   
$z+EfficientNetForImageClassification.__init__Nr9   labelsr   r   r:   c                 C   sl  |dur|n| j j}| j|||d}|r|jn|d }| |}| |}d}|dur| j jdu rU| jdkr;d| j _n| jdkrQ|jt	j
ksL|jt	jkrQd| j _nd| j _| j jdkrst }	| jdkrm|	| | }n+|	||}n%| j jdkrt }	|	|d| j|d}n| j jdkrt }	|	||}|s|f|dd  }
|dur|f|
 S |
S t|||jd	S )
a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr   r   Z
regressionZsingle_label_classificationZmulti_label_classificationr   )losslogitsrT   )r   r   r   r   rr   r   Zproblem_typer   ZdtyperB   longr   r   rd   r   viewr   r   rT   )r6   r9   r   r   r   Zoutputsr   r   r   Zloss_fctoutputr   r   r   r=     s@   



"


z*EfficientNetForImageClassification.forward)NNNN)r>   r?   r@   r*   r   r   rB   rV   Z
LongTensorr_   r   r   r   r=   rD   r   r   r7   r   r     s$    
r   )r   r   r   )T)/rA   r|   typingr   r   r   rB   Ztorch.utils.checkpointr   Ztorch.nnr   r   r   Zactivationsr
   Zmodeling_outputsr   r   r   Zmodeling_utilsr   utilsr   r   Zconfiguration_efficientnetr   Z
get_loggerr>   loggerr   r   r_   r   Moduler   r-   rE   rL   rW   r`   ri   rv   r{   r   r   r   __all__r   r   r   r   <module>   s@   
''!QZ8E