o
    Zh                  	   @   s  d Z ddlZddlZddlmZmZmZmZm	Z	 ddl
Z
ddlZ
ddl
mZ ddlmZ ddlmZmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZ eeZG dd dejZe
j j!dd Z"dd Z#G dd dejZ$d9de
j%de&de'de
j%fddZ(G dd dejZ)G dd  d ejZ*G d!d" d"ejZ+G d#d$ d$ejZ,d%d& Z-d'd( Z.G d)d* d*ejZ/G d+d, d,ejZ0d-ejddfd.d/Z1eG d0d1 d1eZ2eG d2d3 d3e2Z3ed4d5G d6d7 d7e2eZ4g d8Z5dS ):zPyTorch ViTDet backbone.    N)DictListOptionalTupleUnion)nn   )ACT2FN)BackboneOutputBaseModelOutput)PreTrainedModel)auto_docstringlogging)BackboneMixin   )VitDetConfigc                       s>   e Zd ZdZ fddZdd Zdejdejfdd	Z  Z	S )
VitDetEmbeddingsz
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) to be consumed by a Transformer.
    c                    s   t    |j|j}}|j|j}}t|tjj	r|n||f}t|tjj	r)|n||f}|d |d  |d |d   }|| _
|| _|| _|| _|jr]|d }ttd||j| _nd | _tj||||d| _d S )Nr   r   )Zkernel_sizeZstride)super__init__Zpretrain_image_size
patch_sizenum_channelshidden_size
isinstancecollectionsabcIterable
image_sizenum_patchesZ use_absolute_position_embeddingsr   	Parametertorchzerosposition_embeddingsConv2d
projection)selfconfigr   r   r   r   r   Znum_positions	__class__ Y/var/www/auris/lib/python3.10/site-packages/transformers/models/vitdet/modeling_vitdet.pyr   *   s   
 zVitDetEmbeddings.__init__c                 C   s   |r|ddddf }|j d }tt|}|| |kr"tdtj s/||ks/||krOtj	j
|d||ddddd||fdd	d
}|ddddS |d||dS )a  
        Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the
        original embeddings.

        Args:
            abs_pos_embeddings (`torch.Tensor`):
                Absolute positional embeddings with (1, num_position, num_channels).
            has_cls_token (`bool`):
                If true, has 1 embedding in abs_pos_embeddings for cls token.
            height (`int`):
                Height of input image tokens.
            width (`int`):
                Width of input image tokens.

        Returns:
            Absolute positional embeddings after processing with shape (1, height, width, num_channels)
        Nr   z5Absolute position embeddings must be a square number.r   r      ZbicubicF)sizemodeZalign_corners)shapeintmathsqrt
ValueErrorr   jit
is_tracingr   
functionalinterpolatereshapepermute)r$   Zabs_pos_embeddingsZhas_cls_tokenheightwidthZnum_positionr,   Znew_abs_pos_embeddingsr(   r(   r)   get_absolute_positions@   s   
z'VitDetEmbeddings.get_absolute_positionspixel_valuesreturnc                 C   s   |j d }|| jkrtd| j d| d| |}| jd urA|dddd}|| | jd|j d |j d  }|dddd}|S )	Nr   zoMake sure that the channel dimension of the pixel values match with the one set in the configuration. Expected z	 but got .r   r+   r   T)r.   r   r2   r#   r!   r8   r;   )r$   r<   r   
embeddingsr(   r(   r)   forwardf   s"   



zVitDetEmbeddings.forward)
__name__
__module____qualname____doc__r   r;   r   Tensorr@   __classcell__r(   r(   r&   r)   r   $   s
    &r   c                 C   s   t dt| | d }|jd |kr3tjj|d|jd dddd|dd}|d|dd}n|}t	| dddf t||  d }t	|dddf t| | d }|| |d t| | d  }||
  S )	a  
    Get relative positional embeddings according to the relative positions of query and key sizes.

    Args:
        q_size (`int`):
            Size of query q.
        k_size (`int`):
            Size of key k.
        rel_pos (`torch.Tensor`):
            Relative position embeddings (num_embeddings, num_channels).

    Returns:
        Extracted positional embeddings according to relative positions.
    r+   r   r   r*   Zlinear)r,   r-   N      ?)r/   maxr.   r   r5   r6   r7   r8   r   Zarangelong)q_sizek_sizeZrel_posZmax_rel_distZrel_pos_resizedZq_coordsZk_coordsZrelative_coordsr(   r(   r)   get_rel_pos|   s   $$rL   c                 C   s   |\}}|\}}	t |||}
t ||	|}|j\}}}|||||}td||
}
td||}| |||||	|
dddddddddf  |dddddddddf  ||| ||	 } | S )a  
    Calculate decomposed Relative Positional Embeddings as introduced in
    [MViT2](https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py).

    Args:
        attn (`torch.Tensor`):
            Attention map.
        queries (`torch.Tensor`):
            Query q in the attention layer with shape (batch_size, queries_height * queries_width, num_channels).
        rel_pos_h (`torch.Tensor`):
            Relative position embeddings (Lh, num_channels) for height axis.
        rel_pos_w (`torch.Tensor`):
            Relative position embeddings (Lw, num_channels) for width axis.
        q_size (`Tuple[int]`):
            Spatial sequence size of query q with (queries_height, queries_width).
        k_size (`Tuple[int]`):
            Spatial sequence size of key k with (keys_height, keys_width).

    Returns:
        attn (Tensor): attention map with added relative positional embeddings.
    zbhwc,hkc->bhwkzbhwc,wkc->bhwkN)rL   r.   r7   r   Zeinsumview)Zattnqueries	rel_pos_h	rel_pos_wrJ   rK   Zqueries_heightZqueries_widthZkeys_heightZ
keys_widthZrelative_heightZrelative_width
batch_size_dimZr_qZrelative_weightr(   r(   r)   !add_decomposed_relative_positions   s      rT   c                       s,   e Zd ZdZd fdd	Zd	ddZ  ZS )
VitDetAttentionz=Multi-head Attention block with relative position embeddings.Nc                    s   t    |j}|j}|| _|| }|d | _tj||d |jd| _	t||| _
|j| _| jrSttd|d  d || _ttd|d  d || _dS dS )z
        Args:
            config (`VitDetConfig`):
                Model configuration.
            input_size (`Tuple[int]`, *optional*):
                Input resolution, only required in case relative position embeddings are added.
        g      r   biasr+   r   r   N)r   r   r   Znum_attention_heads	num_headsscaler   LinearZqkv_biasqkvproj use_relative_position_embeddingsr   r   r    rO   rP   )r$   r%   
input_sizerS   rX   Zhead_dimr&   r(   r)   r      s   

 $zVitDetAttention.__init__Fc                 C   s&  |j \}}}}| |||| d| jdddddd}|d|| j || dd\}}	}
|| j |	dd }| jrMt	||| j
| j||f||f}|jdd}||
 }||| j||d}|ddddd}||||d}| |}|r||| j|j d |j d }||f}|S |f}|S )	Nr   r*   r+   r   r      )rS   )r.   r[   r7   rX   r8   ZunbindrY   Z	transposer]   rT   rO   rP   ZsoftmaxrM   r\   )r$   hidden_stateoutput_attentionsrQ   r9   r:   rR   r[   rN   keysvaluesZattention_scoresZattention_probsoutputsr(   r(   r)   r@      s,   ,&
zVitDetAttention.forwardN)FrA   rB   rC   rD   r   r@   rF   r(   r(   r&   r)   rU      s    rU           Finput	drop_probtrainingr=   c                 C   sd   |dks|s| S d| }| j d fd| jd   }|tj|| j| jd }|  | || }|S )aF  
    Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

    Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
    however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
    layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
    argument.
    rh   r   r   )r   )dtypedevice)r.   ndimr   Zrandrl   rm   Zfloor_div)ri   rj   rk   Z	keep_probr.   Zrandom_tensoroutputr(   r(   r)   	drop_path	  s   
rq   c                       sT   e Zd ZdZddee ddf fddZdejdejfdd	Z	de
fd
dZ  ZS )VitDetDropPathzXDrop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).Nrj   r=   c                    s   t    || _d S rf   )r   r   rj   )r$   rj   r&   r(   r)   r   !  s   

zVitDetDropPath.__init__hidden_statesc                 C   s   t || j| jS rf   )rq   rj   rk   )r$   rs   r(   r(   r)   r@   %  s   zVitDetDropPath.forwardc                 C   s   d | jS )Nzp={})formatrj   r$   r(   r(   r)   
extra_repr(  s   zVitDetDropPath.extra_reprrf   )rA   rB   rC   rD   r   floatr   r   rE   r@   strrv   rF   r(   r(   r&   r)   rr     s
    rr   c                       s*   e Zd ZdZd fdd	Zdd Z  ZS )VitDetLayerNormaL  
    A LayerNorm variant, popularized by Transformers, that performs point-wise mean and variance normalization over the
    channel dimension for inputs that have shape (batch_size, channels, height, width).
    https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119
    ư>c                    s@   t    tt|| _tt|| _|| _	|f| _
d S rf   )r   r   r   r   r   Zonesweightr    rW   epsnormalized_shape)r$   r}   r|   r&   r(   r)   r   3  s
   
zVitDetLayerNorm.__init__c                 C   sn   |j ddd}|| dj ddd}|| t|| j  }| jd d d d f | | jd d d d f  }|S )Nr   T)Zkeepdimr+   )meanpowr   r1   r|   r{   rW   )r$   xusr(   r(   r)   r@   :  s
   ,zVitDetLayerNorm.forward)rz   rg   r(   r(   r&   r)   ry   ,  s    ry   c                       s(   e Zd ZdZ fddZdd Z  ZS )VitDetResBottleneckBlockz
    The standard bottleneck residual block without the last activation layer. It contains 3 conv layers with kernels
    1x1, 3x3, 1x1.
    c                    s   t    tj||ddd| _t|| _t|j | _	tj||dddd| _
t|| _t|j | _tj||ddd| _t|| _dS )ar  
        Args:
            config (`VitDetConfig`):
                Model configuration.
            in_channels (`int`):
                Number of input channels.
            out_channels (`int`):
                Number of output channels.
            bottleneck_channels (`int`):
                Number of output channels for the 3x3 "bottleneck" conv layers.
        r   FrV   r   )paddingrW   N)r   r   r   r"   conv1ry   norm1r	   
hidden_actZact1conv2norm2Zact2conv3norm3)r$   r%   in_channelsout_channelsbottleneck_channelsr&   r(   r)   r   H  s   


z!VitDetResBottleneckBlock.__init__c                 C   s&   |}|   D ]}||}q|| }|S rf   )children)r$   r   outlayerr(   r(   r)   r@   `  s
   
z VitDetResBottleneckBlock.forwardrg   r(   r(   r&   r)   r   B  s    r   c                       s@   e Zd Zdededdf fddZdejdejfdd	Z  ZS )
	VitDetMlpin_featureshidden_featuresr=   Nc                    sD   t    t||| _t|j | _t||| _t	|j
| _d S rf   )r   r   r   rZ   fc1r	   r   actfc2ZDropoutZdropout_probdrop)r$   r%   r   r   r&   r(   r)   r   j  s
   
zVitDetMlp.__init__r   c                 C   s6   |  |}| |}| |}| |}| |}|S rf   )r   r   r   r   )r$   r   r(   r(   r)   r@   q  s   




zVitDetMlp.forward)	rA   rB   rC   r/   r   r   rE   r@   rF   r(   r(   r&   r)   r   i  s    r   c              	   C   s   | j \}}}}|||  | }|||  | }tj| ddd|d|f} || || }}	| ||| ||	| ||} | dddddd d|||}
|
||	ffS )a  
    Partition into non-overlapping windows with padding if needed.

    Args:
        hidden_state (`torch.Tensor`):
            Input tokens with [batch_size, height, width, num_channels].
        window_size (`int`):
            Window size.

    Returns:
        `tuple(torch.FloatTensor)` comprising various elements:
        - windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels].
        - (padded_height, padded_width): padded height and width before partition
    r   r   r   r+   r_      r*   )r.   r   r5   padrM   r8   
contiguous)ra   window_sizerQ   r9   r:   r   Z
pad_heightZ	pad_widthpadded_heightpadded_widthwindowsr(   r(   r)   window_partition{  s   $r   c           
      C   s   |\}}|\}}| j d || | |  }| ||| || ||d}	|	dddddd }	|	|||d}	|	ddd|d|ddf  }	|	S )	aB  
    Window unpartition into original sequences and removing padding.

    Args:
        windows (`torch.Tensor`):
            Input tokens with [batch_size * num_windows, window_size, window_size, num_channels].
        window_size (`int`):
            Window size.
        pad_height_width (`Tuple[int]`):
            Padded height and width (padded_height, padded_width).
        height_width (`Tuple[int]`):
            Original height and width before padding.

    Returns:
        hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels].
    r   r*   r   r   r+   r_   r   N)r.   rM   r8   r   )
r   r   pad_height_widthZheight_widthr   r   r9   r:   rQ   ra   r(   r(   r)   window_unpartition  s   $r   c                       s|   e Zd ZdZ	ddededededd	f
 fd
dZ			dde	j
dee	j
 dedeee	j
e	j
f ee	j
 f fddZ  ZS )VitDetLayerzCThis corresponds to the Block class in the original implementation.r   Fr%   drop_path_rater   use_residual_blockr=   Nc           	         s  t    |j}|j}t|ttfr|n||f}|j}t|ttfr$|n||f}|d |d  |d |d  f}tj	||j
d| _t||dkrI|n||fd| _|dkrXt|nt | _tj	||j
d| _t||t||j d| _|| _|| _| jrt||||d d| _d S d S )	Nr   r   )r|   )r^   rh   )r%   r   r   r+   )r%   r   r   r   )r   r   r   r   r   listtupler   r   	LayerNormZlayer_norm_epsr   rU   	attentionrr   ZIdentityrq   r   r   r/   Z	mlp_ratiomlpr   r   r   residual)	r$   r%   r   r   r   rS   r   r   r^   r&   r(   r)   r     s0   
 zVitDetLayer.__init__rs   	head_maskrb   c           
      C   s   | dddd}|}| |}| jdkr'|jd |jd }}t|| j\}}| j||d}|d }|dd  }	| jdkrGt|| j|||f}|| | }|| | | 	| }| dddd}| j
rk| |}|f|	 }	|	S )Nr   r+   r   r   )rb   )r8   r   r   r.   r   r   r   rq   r   r   r   r   )
r$   rs   r   rb   Zshortcutr9   r:   r   Zself_attention_outputsre   r(   r(   r)   r@     s*   




zVitDetLayer.forward)r   r   F)NF)rA   rB   rC   rD   r   rw   r/   boolr   r   rE   r   r   r   r@   rF   r(   r(   r&   r)   r     s2    &r   c                       sb   e Zd Zdeddf fddZ				ddejd	eej d
ededede	e
ef fddZ  ZS )VitDetEncoderr%   r=   Nc              	      s   t    || _|j}dd tjd|j|ddD }g }t|D ]}|t	||| ||j
v r1|jnd||jv d q t|| _d| _d S )Nc                 S   s   g | ]}|  qS r(   )item).0r   r(   r(   r)   
<listcomp>  s    z*VitDetEncoder.__init__.<locals>.<listcomp>r   cpu)rm   )r   r   r   F)r   r   r%   num_hidden_layersr   Zlinspacer   rangeappendr   Zwindow_block_indicesr   Zresidual_block_indicesr   Z
ModuleListr   gradient_checkpointing)r$   r%   depthr   Zlayersir&   r(   r)   r     s    
	
zVitDetEncoder.__init__FTrs   r   rb   output_hidden_statesreturn_dictc                 C   s   |rdnd }|r
dnd }t | jD ]8\}}	|r||f }|d ur$|| nd }
| jr6| jr6| |	j||
|}n|	||
|}|d }|rI||d f }q|rQ||f }|s_tdd |||fD S t|||dS )Nr(   r   r   c                 s   s    | ]	}|d ur|V  qd S rf   r(   )r   vr(   r(   r)   	<genexpr>F  s    z(VitDetEncoder.forward.<locals>.<genexpr>Zlast_hidden_staters   
attentions)	enumerater   r   rk   Z_gradient_checkpointing_func__call__r   r   )r$   rs   r   rb   r   r   Zall_hidden_statesZall_self_attentionsr   Zlayer_moduleZlayer_head_maskZlayer_outputsr(   r(   r)   r@   "  s6   

zVitDetEncoder.forward)NFFT)rA   rB   rC   r   r   r   rE   r   r   r   r   r   r@   rF   r(   r(   r&   r)   r     s&    
r   modulec                 C   s6   t jj| jddd | jdurt j| jd dS dS )a  
    Initialize `module.weight` using the "MSRAFill" implemented in Caffe2. Also initializes `module.bias` to 0.

    Source: https://detectron2.readthedocs.io/en/latest/_modules/fvcore/nn/weight_init.html.

    Args:
        module (torch.nn.Module): module to initialize.
    Zfan_outZrelu)r-   ZnonlinearityNr   )r   initZkaiming_normal_r{   rW   Z	constant_)r   r(   r(   r)   caffe2_msra_fillN  s   	
r   c                   @   sB   e Zd ZeZdZdZdZg Zde	e
je
je
jf ddfddZdS )	VitDetPreTrainedModelZvitdetr<   Tr   r=   Nc                 C   s  t |tjtjfr0tjj|jjt	j
d| jjd|jj|j_|jdur.|jj  dS dS t |tjrE|jj  |jjd dS t |trdtjj|jjt	j
d| jjd|jj|j_dS t |tr| jjrtjj|jjt	j
d| jjd|j_tjj|jjt	j
d| jjd|j_dS t |tr|j|j|jfD ]}t| q|j|jfD ]}|jjd |jj  q|jjj  |jjj  dS dS )zInitialize the weightsrh   )r~   ZstdNrG   ) r   r   rZ   r"   r   Ztrunc_normal_r{   datator   Zfloat32r%   Zinitializer_rangerl   rW   Zzero_r   Zfill_r   r!   rU   r]   rO   rP   r   r   r   r   r   r   r   r   )r$   r   r   r(   r(   r)   _init_weightsd  sP   







z#VitDetPreTrainedModel._init_weights)rA   rB   rC   r   Zconfig_classZbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingZ_no_split_modulesr   r   rZ   r"   r   r   r(   r(   r(   r)   r   \  s    &r   c                       s   e Zd Zdef fddZdefddZdeee	e f ddfd	d
Z
e					ddeej deej dee dee dee deeef fddZ  ZS )VitDetModelr%   c                    s2   t  | || _t|| _t|| _|   d S rf   )r   r   r%   r   r?   r   encoder	post_initr$   r%   r&   r(   r)   r     s
   

zVitDetModel.__init__r=   c                 C      | j jS rf   r?   r#   ru   r(   r(   r)   get_input_embeddings     z VitDetModel.get_input_embeddingsheads_to_pruneNc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr   r   r   Zprune_heads)r$   r   r   Zheadsr(   r(   r)   _prune_heads  s   zVitDetModel._prune_headsr<   r   rb   r   r   c           	      C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|du r&td| || j j}| |}| j|||||d}|d }|sL|f|dd  S t	||j
|jdS )a  
        Examples:

        ```python
        >>> from transformers import VitDetConfig, VitDetModel
        >>> import torch

        >>> config = VitDetConfig()
        >>> model = VitDetModel(config)

        >>> pixel_values = torch.randn(1, 3, 224, 224)

        >>> with torch.no_grad():
        ...     outputs = model(pixel_values)

        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 768, 14, 14]
        ```Nz You have to specify pixel_values)r   rb   r   r   r   r   r   )r%   rb   r   use_return_dictr2   Zget_head_maskr   r?   r   r   rs   r   )	r$   r<   r   rb   r   r   embedding_outputZencoder_outputsZsequence_outputr(   r(   r)   r@     s.   
zVitDetModel.forward)NNNNN)rA   rB   rC   r   r   r   r   r   r/   r   r   r   r   r   rE   r   r   r   r   r@   rF   r(   r(   r&   r)   r     s.    

r   zF
    ViTDet backbone, to be used with frameworks like Mask R-CNN.
    )Zcustom_introc                       sb   e Zd Z fddZdefddZe			ddejde	e
 d	e	e
 d
e	e
 def
ddZ  ZS )VitDetBackbonec                    sV   t    t    t | _t | _ fddt jd D | _	| 
  d S )Nc                    s   g | ]} j qS r(   )r   )r   rR   r%   r(   r)   r     s    z+VitDetBackbone.__init__.<locals>.<listcomp>r   )r   r   Z_init_backboner   r?   r   r   r   r   Znum_featuresr   r   r&   r   r)   r     s   

zVitDetBackbone.__init__r=   c                 C   r   rf   r   ru   r(   r(   r)   r     r   z#VitDetBackbone.get_input_embeddingsNr<   r   rb   r   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}| |}| j|d||d}|r1|jn|d }d}t| j|D ]\}	}
|	| j	v rK||
f7 }q=|sf|r[|f|dd  }|S |f|dd  }|S t
||rm|jnd|jdS )a  
        Examples:

        ```python
        >>> from transformers import VitDetConfig, VitDetBackbone
        >>> import torch

        >>> config = VitDetConfig()
        >>> model = VitDetBackbone(config)

        >>> pixel_values = torch.randn(1, 3, 224, 224)

        >>> with torch.no_grad():
        ...     outputs = model(pixel_values)

        >>> feature_maps = outputs.feature_maps
        >>> list(feature_maps[-1].shape)
        [1, 768, 14, 14]
        ```NT)r   rb   r   r   r(   r+   )feature_mapsrs   r   )r%   r   r   rb   r?   r   rs   zipZstage_namesZout_featuresr
   r   )r$   r<   r   rb   r   r   re   rs   r   Zstagera   rp   r(   r(   r)   r@     s8   


zVitDetBackbone.forward)NNN)rA   rB   rC   r   r   r   r   r   rE   r   r   r
   r@   rF   r(   r(   r&   r)   r     s$    r   )r   r   r   )rh   F)6rD   collections.abcr   r0   typingr   r   r   r   r   r   Ztorch.utils.checkpointr   Zactivationsr	   Zmodeling_outputsr
   r   Zmodeling_utilsr   utilsr   r   Zutils.backbone_utilsr   Zconfiguration_vitdetr   Z
get_loggerrA   loggerModuler   r3   Zscript_if_tracingrL   rT   rU   rE   rw   r   rq   rr   ry   r   r   r   r   r   r   r   r   r   r   __all__r(   r(   r(   r)   <module>   sL   
X
$) ?' QC3WN