o
    Zh                  	   @   s&  d Z ddlmZmZmZ ddlZddlZddlmZ ddlm	Z	m
Z
mZ ddlmZ ddlmZmZmZmZ dd	lmZ dd
lmZmZ ddlmZ eeZd>dededee defddZededfdedededefddZ G dd dej!Z"G dd dej!Z#G dd dej!Z$G d d! d!ej!Z%G d"d# d#ej!Z&G d$d% d%ej!Z'G d&d' d'ej!Z(G d(d) d)ej!Z)G d*d+ d+ej!Z*eG d,d- d-eZ+eG d.d/ d/e+Z,ed0d1G d2d3 d3e+Z-G d4d5 d5ej!Z.G d6d7 d7ej!Z/G d8d9 d9ej!Z0ed:d1G d;d< d<e+Z1g d=Z2dS )?zPyTorch MobileViTV2 model.    )OptionalTupleUnionN)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputWithNoAttention(BaseModelOutputWithPoolingAndNoAttention$ImageClassifierOutputWithNoAttentionSemanticSegmenterOutput)PreTrainedModel)auto_docstringlogging   )MobileViTV2Config   valuedivisor	min_valuereturnc                 C   sF   |du r|}t |t| |d  | | }|d|  k r||7 }t|S )a  
    Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
    original TensorFlow repo. It can be seen here:
    https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
    N   g?)maxint)r   r   r   	new_value r   c/var/www/auris/lib/python3.10/site-packages/transformers/models/mobilevitv2/modeling_mobilevitv2.pymake_divisible*   s   r   z-infinfmin_valmax_valc                 C   s   t |t|| S N)r   minr   r!   r"   r   r   r   clip9   s   r&   c                       sv   e Zd Z						ddededededed	ed
edededeeef ddf fddZde	j
de	j
fddZ  ZS )MobileViTV2ConvLayerr   FTconfigin_channelsout_channelskernel_sizestridegroupsbiasdilationuse_normalizationuse_activationr   Nc                    s   t    t|d d | }|| dkr td| d| d|| dkr1td| d| dtj||||||||dd		| _|	rNtj|d
dddd| _nd | _|
rst	|
t
r_t|
 | _d S t	|jt
rmt|j | _d S |j| _d S d | _d S )Nr   r   r   zInput channels (z) are not divisible by z groups.zOutput channels (Zzeros)	r)   r*   r+   r,   paddingr/   r-   r.   Zpadding_modegh㈵>g?T)Znum_featuresepsZmomentumZaffineZtrack_running_stats)super__init__r   
ValueErrorr   Conv2dconvolutionZBatchNorm2dnormalization
isinstancestrr
   
activationZ
hidden_act)selfr(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   	__class__r   r   r5   ?   sB   



zMobileViTV2ConvLayer.__init__featuresc                 C   s6   |  |}| jd ur| |}| jd ur| |}|S r#   )r8   r9   r<   )r=   r@   r   r   r   forwardu   s   




zMobileViTV2ConvLayer.forward)r   r   Fr   TT)__name__
__module____qualname__r   r   boolr   r;   r5   torchTensorrA   __classcell__r   r   r>   r   r'   >   s>    	

6r'   c                       sT   e Zd ZdZ	ddedededededd	f fd
dZdejdejfddZ	  Z
S )MobileViTV2InvertedResidualzQ
    Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381
    r   r(   r)   r*   r,   r/   r   Nc              	      s   t    ttt||j d}|dvrtd| d|dko$||k| _t|||dd| _	t|||d|||d| _
t|||dd	d
| _d S )Nr   )r   r   zInvalid stride .r   )r)   r*   r+   r	   )r)   r*   r+   r,   r-   r/   Fr)   r*   r+   r1   )r4   r5   r   r   roundZexpand_ratior6   use_residualr'   
expand_1x1conv_3x3
reduce_1x1)r=   r(   r)   r*   r,   r/   Zexpanded_channelsr>   r   r   r5      s0   

z$MobileViTV2InvertedResidual.__init__r@   c                 C   s4   |}|  |}| |}| |}| jr|| S |S r#   )rN   rO   rP   rM   )r=   r@   Zresidualr   r   r   rA      s
   


z#MobileViTV2InvertedResidual.forward)r   rB   rC   rD   __doc__r   r   r5   rF   rG   rA   rH   r   r   r>   r   rI      s"    !rI   c                       sP   e Zd Z	ddedededededdf fd	d
ZdejdejfddZ  Z	S )MobileViTV2MobileNetLayerr   r(   r)   r*   r,   
num_stagesr   Nc                    sR   t    t | _t|D ]}t||||dkr|ndd}| j| |}qd S )Nr   r   )r)   r*   r,   )r4   r5   r   
ModuleListlayerrangerI   append)r=   r(   r)   r*   r,   rT   irV   r>   r   r   r5      s   

z"MobileViTV2MobileNetLayer.__init__r@   c                 C      | j D ]}||}q|S r#   rV   )r=   r@   layer_moduler   r   r   rA         

z!MobileViTV2MobileNetLayer.forward)r   r   
rB   rC   rD   r   r   r5   rF   rG   rA   rH   r   r   r>   r   rS      s     rS   c                       sD   e Zd ZdZdededdf fddZdejdejfd	d
Z	  Z
S )MobileViTV2LinearSelfAttentionaq  
    This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper:
    https://arxiv.org/abs/2206.02680

    Args:
        config (`MobileVitv2Config`):
             Model configuration object
        embed_dim (`int`):
            `input_channels` from an expected input of size :math:`(batch_size, input_channels, height, width)`
    r(   	embed_dimr   Nc              	      s\   t    t||dd|  ddddd| _tj|jd| _t|||ddddd| _|| _d S )Nr   r   TF)r(   r)   r*   r.   r+   r0   r1   p)	r4   r5   r'   qkv_projr   Dropoutattn_dropoutout_projr`   )r=   r(   r`   r>   r   r   r5      s*   



	z'MobileViTV2LinearSelfAttention.__init__hidden_statesc           	      C   s   |  |}tj|d| j| jgdd\}}}tjjj|dd}| |}|| }tj|ddd}tjj	||
| }| |}|S )Nr   )Zsplit_size_or_sectionsdimrh   Trh   Zkeepdim)rc   rF   splitr`   r   
functionalZsoftmaxre   sumreluZ	expand_asrf   )	r=   rg   Zqkvquerykeyr   Zcontext_scoresZcontext_vectoroutr   r   r   rA      s   
 

z&MobileViTV2LinearSelfAttention.forwardrQ   r   r   r>   r   r_      s    r_   c                       L   e Zd Z	ddededededdf
 fdd	Zd
ejdejfddZ	  Z
S )MobileViTV2FFN        r(   r`   ffn_latent_dimffn_dropoutr   Nc              
      sZ   t    t|||dddddd| _t|| _t|||dddddd| _t|| _d S )Nr   TF)r(   r)   r*   r+   r,   r.   r0   r1   )	r4   r5   r'   conv1r   rd   dropout1conv2dropout2)r=   r(   r`   rv   rw   r>   r   r   r5     s.   


zMobileViTV2FFN.__init__rg   c                 C   s,   |  |}| |}| |}| |}|S r#   )rx   ry   rz   r{   )r=   rg   r   r   r   rA   '  s
   



zMobileViTV2FFN.forwardru   rB   rC   rD   r   r   floatr5   rF   rG   rA   rH   r   r   r>   r   rt     s     rt   c                       rs   )MobileViTV2TransformerLayerru   r(   r`   rv   dropoutr   Nc                    sb   t    tjd||jd| _t||| _tj|d| _	tjd||jd| _
t||||j| _d S )Nr   Z
num_groupsnum_channelsr3   ra   )r4   r5   r   	GroupNormlayer_norm_epslayernorm_beforer_   	attentionrd   ry   layernorm_afterrt   rw   ffn)r=   r(   r`   rv   r   r>   r   r   r5   0  s   
z$MobileViTV2TransformerLayer.__init__rg   c                 C   s<   |  |}| |}|| }| |}| |}|| }|S r#   )r   r   r   r   )r=   rg   Zlayernorm_1_outZattention_outputZlayer_outputr   r   r   rA   >  s   



z#MobileViTV2TransformerLayer.forwardr|   r}   r   r   r>   r   r   /  s    r   c                       D   e Zd Zdedededdf fddZdejdejfd	d
Z  Z	S )MobileViTV2Transformerr(   n_layersd_modelr   Nc                    sf   t    |j}|| g| }dd |D }t | _t|D ]}t|||| d}| j| qd S )Nc                 S   s   g | ]
}t |d  d  qS )   )r   ).0dr   r   r   
<listcomp>S  s    z3MobileViTV2Transformer.__init__.<locals>.<listcomp>)r`   rv   )	r4   r5   ffn_multiplierr   rU   rV   rW   r   rX   )r=   r(   r   r   r   Zffn_dimsZ	block_idxtransformer_layerr>   r   r   r5   K  s   


zMobileViTV2Transformer.__init__rg   c                 C   rZ   r#   r[   )r=   rg   r\   r   r   r   rA   \  r]   zMobileViTV2Transformer.forwardr^   r   r   r>   r   r   J  s    r   c                       s   e Zd ZdZ			ddededededed	ed
eddf fddZdejde	eje	eef f fddZ
dejde	eef dejfddZdejdejfddZ  ZS )MobileViTV2Layerz=
    MobileViTV2 layer: https://arxiv.org/abs/2206.02680
    r   r   r(   r)   r*   attn_unit_dimn_attn_blocksr/   r,   r   Nc           	         s   t    |j| _|j| _|}|dkr.t||||dkr|nd|dkr&|d ndd| _|}nd | _t||||j|d| _	t|||dddd| _
t|||d| _tjd||jd| _t|||dd	dd| _d S )
Nr   r   )r)   r*   r,   r/   )r)   r*   r+   r-   F)r)   r*   r+   r0   r1   )r   r   r   T)r4   r5   
patch_sizepatch_widthpatch_heightrI   downsampling_layerr'   Zconv_kernel_sizeconv_kxkconv_1x1r   transformerr   r   r   	layernormconv_projection)	r=   r(   r)   r*   r   r   r/   r,   Zcnn_out_dimr>   r   r   r5   g  sN   


zMobileViTV2Layer.__init__feature_mapc                 C   sT   |j \}}}}tjj|| j| jf| j| jfd}|||| j| j d}|||ffS )N)r+   r,   ri   )shaper   rm   Zunfoldr   r   reshape)r=   r   
batch_sizer)   Z
img_heightZ	img_widthpatchesr   r   r   	unfolding  s   

zMobileViTV2Layer.unfoldingr   output_sizec                 C   sH   |j \}}}}|||| |}tjj||| j| jf| j| jfd}|S )N)r   r+   r,   )r   r   r   rm   foldr   r   )r=   r   r   r   Zin_dimr   Z	n_patchesr   r   r   r   folding  s   

zMobileViTV2Layer.foldingr@   c                 C   s`   | j r|  |}| |}| |}| |\}}| |}| |}| ||}| |}|S r#   )r   r   r   r   r   r   r   r   )r=   r@   r   r   r   r   r   rA     s   





zMobileViTV2Layer.forward)r   r   r   )rB   rC   rD   rR   r   r   r5   rF   rG   r   r   r   rA   rH   r   r   r>   r   r   b  s2    
	&="r   c                       sP   e Zd Zdeddf fddZ		ddejd	ed
edee	e
f fddZ  ZS )MobileViTV2Encoderr(   r   Nc                    s  t    || _t | _d| _d }}|jdkrd}d}n|jdkr%d}d}tt	d|j
 dddddd	}td|j
 dd
}td|j
 dd
}td|j
 dd
}td|j
 dd
}	td|j
 dd
}
t|||ddd}| j| t|||ddd}| j| t|||t|jd |j
 dd
|jd d}| j| |r|d9 }t|||	t|jd |j
 dd
|jd |d}| j| |r|d9 }t||	|
t|jd |j
 dd
|jd |d}| j| d S )NFr   Tr   r       @   r%   r   r   r         i     )r)   r*   r,   rT   r   r   )r)   r*   r   r   )r)   r*   r   r   r/   )r4   r5   r(   r   rU   rV   gradient_checkpointingZoutput_strider   r&   width_multiplierrS   rX   r   Zbase_attn_unit_dimsr   )r=   r(   Zdilate_layer_4Zdilate_layer_5r/   layer_0_dimZlayer_1_dimZlayer_2_dimZlayer_3_dimZlayer_4_dimZlayer_5_dimZlayer_1Zlayer_2Zlayer_3Zlayer_4Zlayer_5r>   r   r   r5     s   



zMobileViTV2Encoder.__init__FTrg   output_hidden_statesreturn_dictc                 C   sx   |rdnd }t | jD ]\}}| jr| jr| |j|}n||}|r(||f }q|s6tdd ||fD S t||dS )Nr   c                 s   s    | ]	}|d ur|V  qd S r#   r   )r   vr   r   r   	<genexpr>;  s    z-MobileViTV2Encoder.forward.<locals>.<genexpr>)last_hidden_staterg   )	enumeraterV   r   ZtrainingZ_gradient_checkpointing_func__call__tupler   )r=   rg   r   r   Zall_hidden_statesrY   r\   r   r   r   rA   &  s   
zMobileViTV2Encoder.forward)FT)rB   rC   rD   r   r5   rF   rG   rE   r   r   r   rA   rH   r   r   r>   r   r     s    T
r   c                   @   sD   e Zd ZeZdZdZdZdgZde	e
je
je
jf ddfdd	ZdS )
MobileViTV2PreTrainedModelmobilevitv2pixel_valuesTr   moduler   Nc                 C   st   t |tjtjfr#|jjjd| jjd |j	dur!|j	j
  dS dS t |tjr8|j	j
  |jjd dS dS )zInitialize the weightsru   )meanZstdNg      ?)r:   r   Linearr7   weightdataZnormal_r(   Zinitializer_ranger.   Zzero_	LayerNormZfill_)r=   r   r   r   r   _init_weightsI  s   
z(MobileViTV2PreTrainedModel._init_weights)rB   rC   rD   r   Zconfig_classZbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingZ_no_split_modulesr   r   r   r7   r   r   r   r   r   r   r   @  s    &r   c                       sl   e Zd Zddedef fddZdd Ze			dd	ee	j
 d
ee dee deeef fddZ  ZS )MobileViTV2ModelTr(   expand_outputc              	      sf   t  | || _|| _ttd|j dddddd}t||j|ddd	d	d
| _	t
|| _|   dS )a  
        expand_output (`bool`, *optional*, defaults to `True`):
            Whether to expand the output of the model. If `True`, the model will output pooled features in addition to
            hidden states. If `False`, only the hidden states will be returned.
        r   r   r   r%   r   r   r	   r   Tr)   r*   r+   r,   r0   r1   N)r4   r5   r(   r   r   r&   r   r'   r   	conv_stemr   encoder	post_init)r=   r(   r   r   r>   r   r   r5   X  s"   
	zMobileViTV2Model.__init__c                 C   sF   |  D ]\}}| jj| }t|tr |jjD ]}|j| qqdS )zPrunes heads of the model.
        heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
        N)itemsr   rV   r:   r   r   r   Zprune_heads)r=   Zheads_to_pruneZlayer_indexZheadsZmobilevitv2_layerr   r   r   r   _prune_headst  s   
zMobileViTV2Model._prune_headsNr   r   r   r   c           	      C   s   |d ur|n| j j}|d ur|n| j j}|d u rtd| |}| j|||d}| jr;|d }tj|ddgdd}n|d }d }|sV|d urK||fn|f}||dd   S t	|||j
d	S )
Nz You have to specify pixel_valuesr   r   r   ri   Frk   r   )r   pooler_outputrg   )r(   r   use_return_dictr6   r   r   r   rF   r   r   rg   )	r=   r   r   r   Zembedding_outputZencoder_outputsr   pooled_outputoutputr   r   r   rA   ~  s0   
zMobileViTV2Model.forward)T)NNN)rB   rC   rD   r   rE   r5   r   r   r   rF   rG   r   r   r   rA   rH   r   r   r>   r   r   V  s     

r   z
    MobileViTV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
    ImageNet.
    )Zcustom_introc                       sn   e Zd Zdeddf fddZe				ddeej dee	 deej d	ee	 de
eef f
d
dZ  ZS )!MobileViTV2ForImageClassificationr(   r   Nc                    s`   t  | |j| _t|| _td|j dd}|jdkr%tj||jdnt	 | _
|   d S )Nr   r   r   r   )Zin_featuresZout_features)r4   r5   
num_labelsr   r   r   r   r   r   ZIdentity
classifierr   )r=   r(   r*   r>   r   r   r5     s   

z*MobileViTV2ForImageClassification.__init__r   r   labelsr   c                 C   sb  |dur|n| j j}| j|||d}|r|jn|d }| |}d}|dur| j jdu rP| jdkr6d| j _n| jdkrL|jtj	ksG|jtj
krLd| j _nd| j _| j jdkrnt }	| jdkrh|	| | }n+|	||}n%| j jdkrt }	|	|d| j|d}n| j jdkrt }	|	||}|s|f|dd  }
|dur|f|
 S |
S t|||jd	S )
a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr   r   Z
regressionZsingle_label_classificationZmulti_label_classificationri   r   )losslogitsrg   )r(   r   r   r   r   Zproblem_typer   ZdtyperF   longr   r   Zsqueezer   viewr   r   rg   )r=   r   r   r   r   outputsr   r   r   loss_fctr   r   r   r   rA     s>   


"


z)MobileViTV2ForImageClassification.forwardNNNN)rB   rC   rD   r   r5   r   r   rF   rG   rE   r   r   r   rA   rH   r   r   r>   r   r     s$    
r   c                       r   )MobileViTV2ASPPPoolingr(   r)   r*   r   Nc              	      s4   t    tjdd| _t|||ddddd| _d S )Nr   )r   Tro   r   )r4   r5   r   ZAdaptiveAvgPool2dglobal_poolr'   r   )r=   r(   r)   r*   r>   r   r   r5     s   
zMobileViTV2ASPPPooling.__init__r@   c                 C   s:   |j dd  }| |}| |}tjj||ddd}|S )Nr   bilinearFsizemodeZalign_corners)r   r   r   r   rm   interpolate)r=   r@   Zspatial_sizer   r   r   rA   
  s
   

zMobileViTV2ASPPPooling.forwardr^   r   r   r>   r   r     s    r   c                       @   e Zd ZdZdeddf fddZdejdejfdd	Z  Z	S )
MobileViTV2ASPPzs
    ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587
    r(   r   Nc                    s   t    td j dd}| jt jdkrtdt	 | _
t ddd}| j
| | j
 fd	d
 jD  t }| j
| t d ddd| _tj jd| _d S )Nr   r   r   r	   z"Expected 3 values for atrous_ratesr   ro   rK   c              
      s    g | ]}t  d |ddqS )r	   ro   )r)   r*   r+   r/   r1   )r'   )r   Zrater(   r)   r*   r   r   r   -  s    	z,MobileViTV2ASPP.__init__.<locals>.<listcomp>   ra   )r4   r5   r   r   aspp_out_channelslenZatrous_ratesr6   r   rU   convsr'   rX   extendr   projectrd   Zaspp_dropout_probr   )r=   r(   Zencoder_out_channelsZin_projectionZ
pool_layerr>   r   r   r5     s4   

	zMobileViTV2ASPP.__init__r@   c                 C   sD   g }| j D ]	}||| qtj|dd}| |}| |}|S )Nr   rj   )r   rX   rF   catr   r   )r=   r@   ZpyramidconvZpooled_featuresr   r   r   rA   C  s   


zMobileViTV2ASPP.forward
rB   rC   rD   rR   r   r5   rF   rG   rA   rH   r   r   r>   r   r     s    ,r   c                       r   )
MobileViTV2DeepLabV3zB
    DeepLabv3 architecture: https://arxiv.org/abs/1706.05587
    r(   r   Nc              	      sB   t    t|| _t|j| _t||j	|j
ddddd| _d S )Nr   FT)r)   r*   r+   r0   r1   r.   )r4   r5   r   asppr   Z	Dropout2dZclassifier_dropout_probr   r'   r   r   r   r=   r(   r>   r   r   r5   T  s   

zMobileViTV2DeepLabV3.__init__rg   c                 C   s&   |  |d }| |}| |}|S )Nri   )r   r   r   )r=   rg   r@   r   r   r   rA   d  s   

zMobileViTV2DeepLabV3.forwardr   r   r   r>   r   r   O  s    r   zZ
    MobileViTV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.
    c                       sn   e Zd Zdeddf fddZe				ddeej deej dee	 d	ee	 de
eef f
d
dZ  ZS )"MobileViTV2ForSemanticSegmentationr(   r   Nc                    s8   t  | |j| _t|dd| _t|| _|   d S )NF)r   )r4   r5   r   r   r   r   segmentation_headr   r   r>   r   r   r5   q  s
   
z+MobileViTV2ForSemanticSegmentation.__init__r   r   r   r   c                 C   s  |dur|n| j j}|dur|n| j j}|dur"| j jdkr"td| j|d|d}|r/|jn|d }| |}d}|durYtj	j
||jdd ddd	}	t| j jd
}
|
|	|}|s{|rg|f|dd  }n	|f|dd  }|dury|f| S |S t|||r|jddS dddS )a  
        labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
            Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).

        Examples:

        ```python
        >>> import requests
        >>> import torch
        >>> from PIL import Image
        >>> from transformers import AutoImageProcessor, MobileViTV2ForSemanticSegmentation

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> image_processor = AutoImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
        >>> model = MobileViTV2ForSemanticSegmentation.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")

        >>> inputs = image_processor(images=image, return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)

        >>> # logits are of shape (batch_size, num_labels, height, width)
        >>> logits = outputs.logits
        ```Nr   z/The number of labels should be greater than oneTr   r   r   Fr   )Zignore_indexr   )r   r   rg   Z
attentions)r(   r   r   r   r6   r   rg   r   r   rm   r   r   r   Zsemantic_loss_ignore_indexr   )r=   r   r   r   r   r   Zencoder_hidden_statesr   r   Zupsampled_logitsr   r   r   r   r   rA   {  sB   $

z*MobileViTV2ForSemanticSegmentation.forwardr   )rB   rC   rD   r   r5   r   r   rF   rG   rE   r   r   r   rA   rH   r   r   r>   r   r   k  s$    

r   )r   r   r   r   )r   N)3rR   typingr   r   r   rF   Ztorch.utils.checkpointr   Ztorch.nnr   r   r   Zactivationsr
   Zmodeling_outputsr   r   r   r   Zmodeling_utilsr   utilsr   r   Zconfiguration_mobilevitv2r   Z
get_loggerrB   loggerr   r   r~   r&   Moduler'   rI   rS   r_   rt   r   r   r   r   r   r   r   r   r   r   r   __all__r   r   r   r   <module>   sN   
 (A1?)rlRK=X