o
    Zh                    @   s  d Z ddlZddlmZ ddlmZmZmZmZm	Z	 ddl
Z
ddlZ
ddl
mZ ddlmZ ddlmZmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlmZm Z m!Z! ddl"m#Z#m$Z$m%Z%m&Z& ddl'm(Z(m)Z) e%*e+Z,eG dd de#Z-G dd dej.Z/G dd dej.Z0G dd dej.Z1de0iZ2G dd dej.Z3G dd dej.Z4G dd dej.Z5G d d! d!ej.Z6G d"d# d#ej.Z7e$G d$d% d%eZ8G d&d' d'ej.Z9G d(d) d)ej.Z:	*dId+ej.d,e
j;d-e
j;d.e
j;d/ee
j; d0e<d1e<fd2d3Z=G d4d5 d5ej.Z>G d6d7 d7ej.Z?G d8d9 d9ej.Z@G d:d; d;ej.ZAe$d<d=G d>d? d?e8ZBG d@dA dAej.ZCe$dBd=G dCdD dDe8ZDe$dEd=G dFdG dGe8eZEg dHZFdS )JzPyTorch GIT model.    N)	dataclass)CallableListOptionalTupleUnion)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)_prepare_4d_attention_mask)BaseModelOutputBaseModelOutputWithPastBaseModelOutputWithPoolingCausalLMOutputWithPast)ALL_ATTENTION_FUNCTIONSPreTrainedModel)apply_chunking_to_forward find_pruneable_heads_and_indicesprune_linear_layer)ModelOutputauto_docstringlogging	torch_int   )	GitConfigGitVisionConfigc                   @   sj   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eeejdf  ed< dZeeejdf  ed< dS )GitVisionModelOutputa  
    Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.

    Args:
        image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
            The image embeddings obtained by applying the projection layer to the pooler_output.
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    Nimage_embedslast_hidden_state.hidden_states
attentions)__name__
__module____qualname____doc__r    r   torchFloatTensor__annotations__r!   r"   r   r#    r+   r+   S/var/www/auris/lib/python3.10/site-packages/transformers/models/git/modeling_git.pyr   2   s   
 r   c                       s\   e Zd ZdZ fddZ				ddeej deej deej d	e	d
ej
f
ddZ  ZS )GitEmbeddingsz;Construct the embeddings from word and position embeddings.c                    s   t    tj|j|j|jd| _t|j|j| _	tj
|j|jd| _
t|j| _t|dd| _| jdt|jddd d S )	N)padding_idxZepsposition_embedding_typeabsoluteposition_idsr   F
persistent)super__init__r   	Embedding
vocab_sizehidden_sizeZpad_token_idword_embeddingsmax_position_embeddingsposition_embeddings	LayerNormlayer_norm_epsDropouthidden_dropout_probdropoutgetattrr0   register_bufferr(   arangeexpandselfconfig	__class__r+   r,   r8   S   s   

zGitEmbeddings.__init__Nr   	input_idsr2   inputs_embedspast_key_values_lengthreturnc           	      C   s   |d ur	|  }n|  d d }|d }|d u r&| jd d ||| f }|d u r0| |}n|}| jdkr@| |}||7 }| |}| |}|S )Nr4   r   r1   )sizer2   r<   r0   r>   r?   rC   )	rI   rM   r2   rN   rO   input_shape
seq_length
embeddingsr>   r+   r+   r,   forwardb   s   




zGitEmbeddings.forward)NNNr   )r$   r%   r&   r'   r8   r   r(   Z
LongTensorr)   intTensorrU   __classcell__r+   r+   rK   r,   r-   P   s$    r-   c                       s   e Zd Zd fdd	ZdejdejfddZ					dd	ejd
eej deej dee	 dee
 dee
 deej fddZ  ZS )GitSelfAttentionNc                    sV  t    |j|j dkrt|dstd|j d|j d|| _|d u r1td| j	j
 d |j| _t|j|j | _| j| j | _t|jj|jj d d	 | _|jd ura|  j|j9  _t|j| j| _t|j| j| _t|j| j| _t|j| _|pt|d
d| _| jdks| jdkr|j| _td|j d	 | j| _d S d S )Nr   Zembedding_sizezThe hidden size (z6) is not a multiple of the number of attention heads ()zInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.   r   r0   r1   relative_keyrelative_key_query) r7   r8   r;   num_attention_headshasattr
ValueError	layer_idxloggerwarning_oncerL   r$   rV   attention_head_sizeall_head_sizevision_config
image_size
patch_sizeimage_patch_tokensnum_image_with_embeddingr   LinearquerykeyvaluerA   Zattention_probs_dropout_probrC   rD   r0   r=   r9   distance_embeddingrI   rJ   r0   ra   rK   r+   r,   r8      s:   


zGitSelfAttention.__init__xrP   c                 C   s6   |  d d | j| jf }||}|ddddS )Nr4   r   r[   r   r	   )rQ   r^   rd   viewpermute)rI   rq   Znew_x_shaper+   r+   r,   transpose_for_scores   s   
z%GitSelfAttention.transpose_for_scoresFr"   attention_mask	head_maskpast_key_valueoutput_attentionspixel_values_presentc              	   C   s  |  |}|r
| jnd}| | |}	| | |}
|d urt||	d d d d |d d d f |
d d d d |d d d f | j\}}tj|	d d d d d |d d f |gdd}	tj|
d d d d d |d d f |gdd}
| |}t	||	
dd}| jdks| jdkr
|jd |	jd }}|d urtj|d tj|jd	dd}ntj|tj|jd	dd}tj|tj|jd	dd}|| }| || j d }|j|jd
}| jdkrtd||}|| }n| jdkr
td||}td|	|}|| | }|t| j }|d ur|| }tjj|dd}| |}|d ur1|| }t	||
}|dddd }|  d d | j!f }||}|rY||fn|f}||f }|S )Nr   r[   dimr4   r\   r]   r   dtypedevicer~   zbhld,lrd->bhlrzbhrd,lrd->bhlrr	   )"rl   ri   rt   rm   rn   updatera   r(   catmatmul	transposer0   shapeZtensorlongr   rr   rF   ro   r=   tor~   Zeinsummathsqrtrd   r   
functionalsoftmaxrC   rs   
contiguousrQ   re   )rI   r"   ru   rv   rw   rx   ry   Zmixed_query_layercutoffZ	key_layerZvalue_layerZkey_layer_pastZvalue_layer_pastZquery_layerZattention_scoresZquery_lengthZ
key_lengthZposition_ids_lZposition_ids_rZdistanceZpositional_embeddingZrelative_position_scoresZrelative_position_scores_queryZrelative_position_scores_keyZattention_probsZcontext_layerZnew_context_layer_shapeoutputsr+   r+   r,   rU      sX   
	@..







zGitSelfAttention.forwardNNNNNFF)r$   r%   r&   r8   r(   rW   rt   r   r)   r   boolr   rU   rX   r+   r+   rK   r,   rY      s.    "rY   c                       8   e Zd Z fddZdejdejdejfddZ  ZS )GitSelfOutputc                    sB   t    t|j|j| _tj|j|jd| _t|j	| _
d S Nr/   )r7   r8   r   rk   r;   denser?   r@   rA   rB   rC   rH   rK   r+   r,   r8         
zGitSelfOutput.__init__r"   input_tensorrP   c                 C   &   |  |}| |}| || }|S Nr   rC   r?   rI   r"   r   r+   r+   r,   rU         

zGitSelfOutput.forwardr$   r%   r&   r8   r(   rW   rU   rX   r+   r+   rK   r,   r          $r   eagerc                       sx   e Zd Zd fdd	Zdd Z					ddejdeej d	eej d
ee	 dee
 dee
 deej fddZ  ZS )GitAttentionNc                    s6   t    t|j |||d| _t|| _t | _d S )N)r0   ra   )	r7   r8   GIT_SELF_ATTENTION_CLASSES_attn_implementationrI   r   outputsetpruned_headsrp   rK   r+   r,   r8   
  s   

zGitAttention.__init__c                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   rz   )lenr   rI   r^   rd   r   r   rl   rm   rn   r   r   re   union)rI   headsindexr+   r+   r,   prune_heads  s   zGitAttention.prune_headsFr"   ru   rv   rw   rx   ry   rP   c           
      C   s:   |  ||||||}| |d |}|f|dd   }	|	S )Nr   r   )rI   r   )
rI   r"   ru   rv   rw   rx   ry   Zself_outputsattention_outputr   r+   r+   r,   rU   %  s   	zGitAttention.forwardr   r   )r$   r%   r&   r8   r   r(   rW   r   r)   r   r   r   rU   rX   r+   r+   rK   r,   r   	  s.    	r   c                       2   e Zd Z fddZdejdejfddZ  ZS )GitIntermediatec                    sD   t    t|j|j| _t|jt	rt
|j | _d S |j| _d S r   )r7   r8   r   rk   r;   intermediate_sizer   
isinstance
hidden_actstrr
   intermediate_act_fnrH   rK   r+   r,   r8   =  s
   
zGitIntermediate.__init__r"   rP   c                 C   s   |  |}| |}|S r   )r   r   rI   r"   r+   r+   r,   rU   E  s   

zGitIntermediate.forwardr   r+   r+   rK   r,   r   <  s    r   c                       r   )	GitOutputc                    sB   t    t|j|j| _tj|j|jd| _t	|j
| _d S r   )r7   r8   r   rk   r   r;   r   r?   r@   rA   rB   rC   rH   rK   r+   r,   r8   M  r   zGitOutput.__init__r"   r   rP   c                 C   r   r   r   r   r+   r+   r,   rU   S  r   zGitOutput.forwardr   r+   r+   rK   r,   r   L  r   r   c                       sx   e Zd Zd fdd	Z					ddejdeej deej dee d	ee	 d
ee	 de
ej fddZdd Z  ZS )GitLayerNc                    s>   t    |j| _d| _t||d| _t|| _t|| _	d S )Nr   )ra   )
r7   r8   chunk_size_feed_forwardseq_len_dimr   	attentionr   intermediater   r   )rI   rJ   ra   rK   r+   r,   r8   [  s   

zGitLayer.__init__Fr"   ru   rv   rw   rx   ry   rP   c                 C   s^   | j ||||||d}|d }|dd }	|d }
t| j| j| j|}|f|	 }	|	|
f }	|	S )N)rx   rw   ry   r   r   r4   )r   r   feed_forward_chunkr   r   )rI   r"   ru   rv   rw   rx   ry   Zself_attention_outputsr   r   Zpresent_key_valuelayer_outputr+   r+   r,   rU   c  s"   


zGitLayer.forwardc                 C   s   |  |}| ||}|S r   )r   r   )rI   r   Zintermediate_outputr   r+   r+   r,   r     s   
zGitLayer.feed_forward_chunkr   r   )r$   r%   r&   r8   r(   rW   r   r)   r   r   r   rU   r   rX   r+   r+   rK   r,   r   Z  s.    
"r   c                       s   e Zd Z fddZ								ddejdeej deej d	eee	e
e
ej  f  d
ee dee dee dee dee dee
ej ef fddZ  ZS )
GitEncoderc                    :   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  |qS r+   )r   ).0irJ   r+   r,   
<listcomp>  s    z'GitEncoder.__init__.<locals>.<listcomp>F)	r7   r8   rJ   r   
ModuleListrangenum_hidden_layerslayergradient_checkpointingrH   rK   r   r,   r8        
 
zGitEncoder.__init__NFTr"   ru   rv   past_key_values	use_cacherx   output_hidden_statesry   return_dictrP   c
              	   C   sZ  | j r| jr|rtd d}d}
|r,t|ts,d}
|d u r"t }n
t|}td |r0dnd }|r6dnd }d }t| j	D ]C\}}|rJ||f }|d urR|| nd }| j rf| jrf| 
|j|||||}n	|||||||}|d }|ry|d }|r||d f }q?|r||f }|r|nd }|
r| }|	std	d
 ||||fD S t||||dS )NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...FTzWe detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class (https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)r+   r   r4   r   c                 s       | ]	}|d ur|V  qd S r   r+   r   vr+   r+   r,   	<genexpr>  s    z%GitEncoder.forward.<locals>.<genexpr>r!   r   r"   r#   )r   trainingrb   rc   r   r   r   Zfrom_legacy_cache	enumerater   _gradient_checkpointing_func__call__Zto_legacy_cachetupler   )rI   r"   ru   rv   r   r   rx   r   ry   r   Zreturn_legacy_cacheZall_hidden_statesZall_self_attentionsZnext_decoder_cacher   Zlayer_moduleZlayer_head_masklayer_outputsZ
next_cacher+   r+   r,   rU     s   

		

zGitEncoder.forward)NNNNFFFT)r$   r%   r&   r8   r(   rW   r   r)   r   r   r   r   r   rU   rX   r+   r+   rK   r,   r     s>    		
r   c                   @   s(   e Zd ZeZdZdZdZdZdd Z	dS )GitPreTrainedModelgitTc                 C   s  t |tr)tjj|jd| jjd tjj|jj	| jjd tjj|j
j	| jjd t |tjrI|j	jjd| jjd |jdurG|jj  dS dS t |tjrl|j	jjd| jjd |jdurj|j	j|j   dS dS t |tjr|jj  |j	jd dS dS )zInitialize the weights        )meanstd)r   Ng      ?)r   GitVisionEmbeddingsr   initZnormal_class_embeddingrJ   Zinitializer_rangepatch_embeddingweightposition_embeddingrk   databiasZzero_r9   r.   r?   Zfill_)rI   moduler+   r+   r,   _init_weights  s$   


z GitPreTrainedModel._init_weightsN)
r$   r%   r&   r   config_classZbase_model_prefixZsupports_gradient_checkpointingZ_supports_cache_classZ_supports_quantized_cacher   r+   r+   r+   r,   r     s    r   c                       sX   e Zd Zdef fddZdejdededejfdd	Zddej	dejfddZ
  ZS )r   rJ   c                    s   t    || _|j| _|j| _|j| _tt	
| j| _tj|j| j| j| jdd| _| j| j d | _| jd | _t| j| j| _| jdt	| jddd d S )NF)Zin_channelsZout_channelsZkernel_sizeZstrider   r[   r   r2   r3   r5   )r7   r8   rJ   r;   	embed_dimrg   rh   r   	Parameterr(   Zrandnr   ZConv2dZnum_channelsr   num_patchesnum_positionsr9   r   rE   rF   rG   rH   rK   r+   r,   r8     s"   
"zGitVisionEmbeddings.__init__rT   heightwidthrP   c                 C   s  |j d d }| jjd}|j d d }tj s(||kr(||kr(| | jS |ddddf }|ddddf }|j d }	|| j }
|| j }t	|d }|
d|||	}|dddd}tjj||
|fdd	d
}|dddddd|	}tj||fddS )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   r   Nr4   g      ?r	   r[   ZbicubicF)rQ   modeZalign_cornersrz   )r   r   r   Z	unsqueezer(   Zjit
is_tracingr2   rh   r   reshapers   r   r   Zinterpolaterr   r   )rI   rT   r   r   r   r   r   Zclass_pos_embedZpatch_pos_embedr{   Z
new_heightZ	new_widthZsqrt_num_positionsr+   r+   r,   interpolate_pos_encoding$  s*   



z,GitVisionEmbeddings.interpolate_pos_encodingFpixel_valuesc              
   C   s   |j \}}}}|s&|| jks|| jkr&td| d| d| j d| j d	| jjj}| |j|d}|ddd}| j	
|dd}	tj|	|gdd	}
|r[|
| |
|| }
|
S |
| | j }
|
S )
NzInput image size (*z) doesn't match model ().r   r[   r   r4   rz   )r   rg   r`   r   r   r~   r   flattenr   r   rG   r(   r   r   r   r2   )rI   r   r   
batch_size_r   r   Ztarget_dtypeZpatch_embedsZclass_embedsrT   r+   r+   r,   rU   M  s    
zGitVisionEmbeddings.forwardF)r$   r%   r&   r   r8   r(   rW   rV   r   r)   rU   rX   r+   r+   rK   r,   r     s     )r   c                       r   )GitVisionMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S r   )r7   r8   rJ   r
   r   activation_fnr   rk   r;   r   fc1fc2rH   rK   r+   r,   r8   a  s
   
zGitVisionMLP.__init__r"   rP   c                 C   s"   |  |}| |}| |}|S r   )r   r   r   r   r+   r+   r,   rU   h  s   


zGitVisionMLP.forwardr   r+   r+   rK   r,   r   `  s    r   r   r   rl   rm   rn   ru   scalingrC   c           
      K   s|   t ||dd| }|d ur|| }tjj|dt jd|j}tjj	||| j
d}t ||}	|	dd }	|	|fS )Nr4   r|   )r{   r~   )pr   r   r[   )r(   r   r   r   r   r   Zfloat32r   r~   rC   r   r   )
r   rl   rm   rn   ru   r   rC   kwargsattn_weightsattn_outputr+   r+   r,   eager_attention_forwardp  s   
r  c                       sh   e Zd ZdZ fddZ			ddejdeej deej d	ee d
e	ejeej f f
ddZ
  ZS )GitVisionAttentionz=Multi-headed attention from 'Attention Is All You Need' paperc                    s   t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	|j
| _d| _t| j| j| _t| j| j| _t| j| j| _t| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: r   g      F)r7   r8   rJ   r;   r   r^   	num_headshead_dimr`   scaleZattention_dropoutrC   	is_causalr   rk   k_projv_projq_projout_projrH   rK   r+   r,   r8     s$   

zGitVisionAttention.__init__NFr"   ru   causal_attention_maskrx   rP   c              
   C   sL  |j \}}}| |}| |}	| |}
|||| j| jdd}|	||| j| jdd}	|
||| j| jdd}
| jj	dkrY|durR|durR|| }n|durX|}n|du| _
t}| jj	dkrz| jj	dkrt|rttd nt| jj	 }|| ||	|
|| j
| j| jsdn| jd	\}}|||| }| |}|sd}||fS )
z#Input shape: Batch x Time x Channelr   r[   Zflash_attention_2Nr   Zsdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.r   )r  r   rC   )r   r	  r  r  rr   r  r  r   rJ   r   r  r  rb   rc   r   r  r   rC   r   r   r
  )rI   r"   ru   r  rx   r   rS   r   ZquerieskeysvaluesZattention_interfacer   r   r+   r+   r,   rU     sH   	






zGitVisionAttention.forward)NNF)r$   r%   r&   r'   r8   r(   rW   r   r   r   rU   rX   r+   r+   rK   r,   r    s"    r  c                       sT   e Zd Zdef fddZ	ddejdejdejdee d	e	ej
 f
d
dZ  ZS )GitVisionEncoderLayerrJ   c                    sR   t    |j| _t|| _tj| j|jd| _	t
|| _tj| j|jd| _d S r   )r7   r8   r;   r   r  	self_attnr   r?   r@   layer_norm1r   mlplayer_norm2rH   rK   r+   r,   r8     s   


zGitVisionEncoderLayer.__init__Fr"   ru   r  rx   rP   c                 C   sd   |}|  |}| j||||d\}}|| }|}| |}| |}|| }|f}|r0||f7 }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r"   ru   r  rx   )r  r  r  r  )rI   r"   ru   r  rx   Zresidualr   r   r+   r+   r,   rU     s"   




zGitVisionEncoderLayer.forwardr   )r$   r%   r&   r   r8   r(   rW   r   r   r   r)   rU   rX   r+   r+   rK   r,   r    s    r  c                       st   e Zd ZdZdef fddZ					ddeej deej dee	 d	ee	 d
ee	 de
eef fddZ  ZS )GitVisionEncoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`GitVisionEncoderLayer`].

    Args:
        config: GitVisionConfig
    rJ   c                    r   )Nc                    s   g | ]}t  qS r+   )r  r   r   r   r+   r,   r     s    z-GitVisionEncoder.__init__.<locals>.<listcomp>F)	r7   r8   rJ   r   r   r   r   layersr   rH   rK   r   r,   r8     r   zGitVisionEncoder.__init__Nru   r  rx   r   r   rP   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|r"dnd}|r(dnd}|}	t| jD ]1\}
}|r<||	f }| jrM| jrM| |j	|	|||}n||	|||d}|d }	|rb||d f }q1|rj||	f }|sxt
dd |	||fD S t|	||dS )	a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Causal mask for the text model. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr+   )rx   r   r   c                 s   r   r   r+   r   r+   r+   r,   r   d  s    z+GitVisionEncoder.forward.<locals>.<genexpr>r!   r"   r#   )rJ   rx   r   use_return_dictr   r  r   r   r   r   r   r   )rI   rN   ru   r  rx   r   r   Zencoder_statesZall_attentionsr"   idxZencoder_layerr   r+   r+   r,   rU     sF   &

zGitVisionEncoder.forward)NNNNN)r$   r%   r&   r'   r   r8   r   r(   rW   r   r   r   r   rU   rX   r+   r+   rK   r,   r  	  s*    	
r  c                       sr   e Zd Zdef fddZe					ddeej dee	 dee	 d	ee	 d
ee	 de
eef fddZ  ZS )GitVisionTransformerrJ   c                    sR   t    || _|j}t|| _tj||jd| _	t
|| _tj||jd| _d S r   )r7   r8   rJ   r;   r   rT   r   r?   r@   pre_layrnormr  encoderpost_layernorm)rI   rJ   r   rK   r+   r,   r8   l  s   


zGitVisionTransformer.__init__NFr   rx   r   r   r   rP   c           	      C   s   |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u r&td| j||d}| |}| j||||d}|d }| |}|sO|f|dd   S t	||j
|jdS )Nz You have to specify pixel_valuesr   )rN   rx   r   r   r   r   r  )rJ   rx   r   r  r`   rT   r  r  r  r   r"   r#   )	rI   r   rx   r   r   r   r"   encoder_outputsr!   r+   r+   r,   rU   v  s.   	

zGitVisionTransformer.forwardNNNFN)r$   r%   r&   r   r8   r   r   r(   r)   r   r   r   r   rU   rX   r+   r+   rK   r,   r  j  s*    

r  zY
    The vision model from CLIP, used in GIT, without any head or projection on top.
    )Zcustom_introc                       s   e Zd ZeZdZdef fddZdejfddZ	e
						ddeej d
ee dee dedee deeef fddZ  ZS )GitVisionModelr   rJ   c                    s"   t  | t|| _|   d S r   )r7   r8   r  vision_model	post_initrH   rK   r+   r,   r8     s   
zGitVisionModel.__init__rP   c                 C   s
   | j jjS r   )r!  rT   r   rI   r+   r+   r,   get_input_embeddings     
z#GitVisionModel.get_input_embeddingsNFrx   r   r   r   c                 C   s(   |dur|n| j j}| j|||||dS )a{  
        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, GitVisionModel

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
        >>> model = GitVisionModel.from_pretrained("microsoft/git-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        ```N)r   rx   r   r   r   )rJ   r  r!  )rI   r   rx   r   r   r   r+   r+   r,   rU     s   zGitVisionModel.forwardr  )r$   r%   r&   r   r   Zmain_input_namer8   r   Moduler$  r   r   r(   r)   r   r   r   r   rU   rX   r+   r+   rK   r,   r     s0    
r   c                       s8   e Zd Zdef fddZdejdejfddZ  ZS )GitProjectionrJ   c                    s@   t    || _tt|jj|jtj|j|jj	d| _
d S r   )r7   r8   rJ   r   Z
Sequentialrk   rf   r;   r?   r@   visual_projectionrH   rK   r+   r,   r8     s   

zGitProjection.__init__rT   rP   c                 C   s
   |  |S r   )r(  )rI   rT   r+   r+   r,   rU     r%  zGitProjection.forward)	r$   r%   r&   r   r8   r(   rW   rU   rX   r+   r+   rK   r,   r'    s    r'  zy
    The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states
    c                       s  e Zd Z fddZdd Zdd Zdd Zd	ed
ej	dej
dejfddZd!ddZe												d"deej deej deej deej deej deej deeeeej f  dee dee dee dedee deeej ef fdd Z  ZS )#GitModelc                    sr   t     | _t | _t j| _t | _	t
 | _ jd ur3t fddt jD | _|   d S )Nc                 3   s(    | ]}t td d  jjV  qdS )r   N)r   r   r(   zerosrf   r;   r  r   r+   r,   r     s
    
z$GitModel.__init__.<locals>.<genexpr>)r7   r8   rJ   r-   rT   r   rf   image_encoderr   r  r'  r(  rj   r   ZParameterListr   img_temperal_embeddingr"  rH   rK   r   r,   r8     s   




zGitModel.__init__c                 C   s   | j jS r   rT   r<   r#  r+   r+   r,   r$     s   zGitModel.get_input_embeddingsc                 C   s   || j _d S r   r-  )rI   rn   r+   r+   r,   set_input_embeddings  s   zGitModel.set_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr  r   r   r   )rI   Zheads_to_pruner   r   r+   r+   r,   _prune_heads  s   zGitModel._prune_headsrQ   r~   r   rP   c                 C   s4   t jt j||||ddd}||dktd}|S )Nr   r~   r   )Zdiagonal-inf)r(   ZtriuZonesZmasked_fillfloat)rI   rQ   r~   r   maskr+   r+   r,   _generate_future_mask  s   zGitModel._generate_future_maskNc                 C   s  |j d }|j d }|j}|j}	tj||f||	d}
tj||| ftd|j|	d}tj||f|	|jd}|dkrLtj|j d |j d | f|	|jd}tj|
|fdd}tj|||	fdd}tj||fddd d d f }|d u rtj|j d |j d fd|d}|jtj	krt
d	tj||jd
}td||< ||j d || || | f}| }|d d d d d |f }|d d d d d f }|| |d d d d d |f< |d d d d d d d f }|S )Nr   r1  r2  r}   r   rz   F)Z
fill_valuer   z1Memory key padding mask must be a boolean tensor.r   )r   r   r~   r(   r*  fullr3  r   r   r   r`   Z
zeros_likerG   clone)rI   tgtmemorytgt_maskrO   Zmemory_key_padding_maskZnum_tgtZ
num_memoryr   r~   top_left	top_rightbottom_leftleftrightZfull_attention_maskZzero_negative_infinityZorigin_leftr   r+   r+   r,   create_attention_mask  sP   


 zGitModel.create_attention_maskFrM   ru   r2   r   rv   rN   r   r   rx   r   r   r   c                 C   s  |	dur|	n| j j}	|
dur|
n| j j}
|dur|n| j j}|dur$|n| j j}|dur4|dur4td|durC| || | }n|durP| dd }ntd|d }d}|durpt|t	sl|d d j
d n| }| || j j}d}|dur|jdkr| j||d	j}n=|jd
krg }t|j
d D ]"}| j|dd|ddddf |d	j}|| j| 7 }|| qtj|dd}ntd| |}| j||||d}|du rtj|j
d d|j
d f|j|jd}||d|d dd}tj||fdd}| ||j|j}| j||||d}|dur\t||j|d d|j}|dkrB|dddd| dddf }n|dddd|d  d|d  df  |7  < | j ||||||	|
||dud	}|d }|s||f|dd  S t!||j"|j#|j$dS )a  
        Examples:

        ```python
        >>> from transformers import AutoProcessor, AutoModel
        >>> import requests
        >>> from PIL import Image

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
        >>> model = AutoModel.from_pretrained("microsoft/git-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> text = "this is an image of two cats"

        >>> inputs = processor(images=image, text=text, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        ```NzDYou cannot specify both input_ids and inputs_embeds at the same timer4   z5You have to specify either input_ids or inputs_embedsr   r   r[      r     rz   z#pixel_values must be of rank 4 or 5)rM   r2   rN   rO   r}   )r8  r9  r:  rO   )Ztgt_len)ru   rv   r   r   rx   r   r   ry   r   )%rJ   rx   r   r   r  r`   Z%warn_if_padding_and_no_attention_maskrQ   r   r   r   get_seq_lengthZget_head_maskr   ndimr+  r!   r   r,  appendr(   r   r(  rT   r*  r~   r   repeatr5  r@  r   r   r  r   r   r"   r#   )rI   rM   ru   r2   r   rv   rN   r   r   rx   r   r   r   rR   rS   rO   Zprojected_visual_featuresZvisual_featuresZ	frame_idxZvisual_features_frameZembedding_outputr"   r:  Zcombined_attention_maskZexpanded_attn_maskr  sequence_outputr+   r+   r,   rU   F  s   %





$4zGitModel.forwardr   )NNNNNNNNNNFN)r$   r%   r&   r8   r$  r.  r0  rV   r(   r~   r   rW   r5  r@  r   r   r   r   r   r)   r   r   r   rU   rX   r+   r+   rK   r,   r)    s^     
2	
r)  z`
    GIT Model with a `language modeling` head on top for autoregressive language modeling.
    c                        s  e Zd ZdgZ fddZdd Zdd Ze														dd
ee	j
 dee	j
 dee	j
 dee	j
 dee	j
 dee	j
 dee	j
 deeeee	j
 f  dee dee dee dedee deee	j
 ef fddZ	dddZdd Z  ZS ) GitForCausalLMzoutput.weightc                    s4   t  | t|| _t|j|j| _| 	  d S r   )
r7   r8   r)  r   r   rk   r;   r:   r   r"  rH   rK   r+   r,   r8     s   
zGitForCausalLM.__init__c                 C   s   | j S r   r   r#  r+   r+   r,   get_output_embeddings  s   z$GitForCausalLM.get_output_embeddingsc                 C   s
   || _ d S r   rI  )rI   Znew_embeddingsr+   r+   r,   set_output_embeddings  r%  z$GitForCausalLM.set_output_embeddingsNFrM   ru   r2   r   rv   rN   labelsr   r   rx   r   r   r   rP   c                 K   s  |dur|n| j j}|durd}	| j||||||||	|
|||d}|d }| |}d}|durl| jjjd jjj}|dd|dddf 	 }|ddddf 	 }| j
|d| j j|dfd| j ji|}|s|f|dd  }|dur|f| S |S t|||j|j|jdS )	a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
            `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
            ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`

        Examples:

        Image captioning example:

        ```python
        >>> from transformers import AutoProcessor, AutoModelForCausalLM
        >>> import requests
        >>> from PIL import Image

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-coco")
        >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values

        >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
        >>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
        >>> print(generated_caption)
        two cats sleeping on a pink blanket next to remotes.
        ```

        Visual question answering (VQA) example:

        ```python
        >>> from transformers import AutoProcessor, AutoModelForCausalLM
        >>> from huggingface_hub import hf_hub_download
        >>> from PIL import Image

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-textvqa")
        >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-textvqa")

        >>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
        >>> image = Image.open(file_path).convert("RGB")

        >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values

        >>> question = "what does the front of the bus say at the top?"

        >>> input_ids = processor(text=question, add_special_tokens=False).input_ids
        >>> input_ids = [processor.tokenizer.cls_token_id] + input_ids
        >>> input_ids = torch.tensor(input_ids).unsqueeze(0)

        >>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
        >>> print(processor.batch_decode(generated_ids, skip_special_tokens=True))
        ['what does the front of the bus say at the top? special']
        ```

        Video captioning example:

        ```python
        >>> import av
        >>> import numpy as np
        >>> from PIL import Image
        >>> from huggingface_hub import hf_hub_download
        >>> from transformers import AutoProcessor, AutoModelForCausalLM

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-vatex")
        >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-vatex")

        >>> # set seed for reproducibility
        >>> np.random.seed(45)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # load video
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample frames
        >>> num_frames = model.config.num_image_with_embedding
        >>> indices = sample_frame_indices(
        ...     clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames
        ... )
        >>> frames = read_video_pyav(container, indices)

        >>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values

        >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)

        >>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
        Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.']
        ```
        NF)ru   r2   r   rv   rN   r   r   rx   r   r   r   r   r4   r   r:   )losslogitsr   r"   r#   )rJ   r  r   r   r  r   r   rI   ri   r   Zloss_functionrr   r:   r   r   r"   r#   )rI   rM   ru   r2   r   rv   rN   rL  r   r   rx   r   r   r   r   r   rG  rN  rM  Znum_image_tokensZshifted_logitsr   r+   r+   r,   rU     sV    
zGitForCausalLM.forwardc           	      K   sv   |d ur#|  }|jd |kr|}n|jd d }|d d |d f }|j}|d u r/||}|||dd ||dS )Nr   r   )rM   ru   r   r   r   )rC  r   Znew_onesget)	rI   rM   r   ru   r   r   Zpast_lengthZremove_prefix_lengthrR   r+   r+   r,   prepare_inputs_for_generation  s   

z,GitForCausalLM.prepare_inputs_for_generationc                    s.   d}|D ]}|t  fdd|D f7 }q|S )Nr+   c                 3   s$    | ]}| d  |jV  qdS )r   N)Zindex_selectr   r   )r   Z
past_statebeam_idxr+   r,   r     s   " z0GitForCausalLM._reorder_cache.<locals>.<genexpr>)r   )rI   r   rR  Zreordered_pastZ
layer_pastr+   rQ  r,   _reorder_cache  s   zGitForCausalLM._reorder_cache)NNNNNNNNNNNFN)NNN)r$   r%   r&   Z_tied_weights_keysr8   rJ  rK  r   r   r(   rW   r   r   r   r   r   r   rU   rP  rS  rX   r+   r+   rK   r,   rH    sh    		
 E
rH  )rH  r)  r   r   )r   )Gr'   r   dataclassesr   typingr   r   r   r   r   r(   Ztorch.utils.checkpointr   Zactivationsr
   Zcache_utilsr   r   Z
generationr   Zmodeling_attn_mask_utilsr   Zmodeling_outputsr   r   r   r   Zmodeling_utilsr   r   Zpytorch_utilsr   r   r   utilsr   r   r   r   Zconfiguration_gitr   r   Z
get_loggerr$   rb   r   r&  r-   rY   r   r   r   r   r   r   r   r   r   r   rW   r3  r  r  r  r  r  r   r'  r)  rH  __all__r+   r+   r+   r,   <module>   s   
0v31dS
P3a65   