o
    ZhV                     @   sV  d Z ddlZddlmZ ddlmZmZmZmZ ddl	Z	ddl
Z	ddl	mZ ddlmZ ddlmZmZ dd	lmZ dd
lmZmZ ddlmZ eeZeG dd deZG dd dejZ	d%dejde	jde	jde	jdee	j dedefddZ G dd dejZ!G dd dejZ"G dd  d ejZ#G d!d" d"ejZ$G d#d$ d$ejZ%dS )&zTPyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object    N)	dataclass)CallableOptionalTupleUnion)nn   )ACT2FN)BaseModelOutputBaseModelOutputWithPooling)ALL_ATTENTION_FUNCTIONS)ModelOutputlogging   )IdeficsVisionConfigc                   @   sj   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eeejdf  ed< dZeeejdf  ed< dS )IdeficsVisionModelOutputa  
    Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.

    Args:
        image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
            The image embeddings obtained by applying the projection layer to the pooler_output.
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    Nimage_embedslast_hidden_state.hidden_states
attentions)__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r   r   r   r    r   r   Q/var/www/auris/lib/python3.10/site-packages/transformers/models/idefics/vision.pyr   &   s   
 r   c                       s\   e Zd Zdef fddZdejdededejfdd	Zddej	de
dejfddZ  ZS )IdeficsVisionEmbeddingsconfigc                    s   t    || _|j| _|j| _|j| _tt	
| j| _tj|j| j| j| jdd| _| j| j d | _| jd | _t| j| j| _| jdt	| jddd d S )NF)Zin_channelsZout_channelsZkernel_sizeZstrideZbias   r   position_ids)r   )
persistent)super__init__r    hidden_size	embed_dim
image_size
patch_sizer   	Parameterr   Zrandnclass_embeddingZConv2dnum_channelspatch_embeddingnum_patchesnum_positionsZ	Embeddingposition_embeddingZregister_bufferZarangeexpandselfr    	__class__r   r   r&   E   s"   
"z IdeficsVisionEmbeddings.__init__
embeddingsheightwidthreturnc                 C   s  |j d d }| | j}|j d d }||kr||kr|S |dddf }|ddddf }|j d }	|| jj }
|| jj }|
d |d }
}t|}|dt|t||	}|	dddd}|j
tjk}|rvtd |tj}tjj||
| || fd	d
d}|r|tj}t|
|j d kst||j d krtdt|
t|f d|j d |j d f d|	dddddd|	}tj|d|fddS )a#  
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
        resolution images.

        Source:
        https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
        r   Nr   r#   g?r   r!   zUpcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead.ZbicubicF)Zscale_factormodeZalign_cornerszNumber of patches for images (z/) don't match the shape of position embedding ()dim)shaper1   r"   r    r*   mathsqrtreshapeintZpermutedtyper   Zbfloat16loggerwarning_oncetofloatr   
functionalZinterpolate
ValueErrorviewcatZ	unsqueeze)r4   r7   r8   r9   r/   Z	pos_embedr0   Zclass_pos_embedZpatch_pos_embedr(   Znum_h_patchesZnum_w_patchesZsqrt_num_positionsZfp32_upcastingr   r   r   interpolate_pos_encoding\   sH   	

$z0IdeficsVisionEmbeddings.interpolate_pos_encodingFpixel_valuesrN   c              
   C   s   |j \}}}}|s&|| jks|| jkr&td| d| d| j d| j d	| jjj}| |j|d}|ddd}| j	
|dd}	tj|	|gdd	}
|r[|
| |
|| }
|
S |
| | j }
|
S )
NzInput image size (*z) doesn't match model (z8). You should try to set `interpolate_pos_encoding=True`)rE   r!   r   r#   r>   )r@   r)   rK   r.   weightrE   rH   flatten	transposer,   r2   r   rM   rN   r1   r"   )r4   rO   rN   
batch_sizer-   r8   r9   Ztarget_dtypeZpatch_embedsZclass_embedsr7   r   r   r   forward   s(   
zIdeficsVisionEmbeddings.forwardF)r   r   r   r   r&   r   TensorrD   rN   r   boolrU   __classcell__r   r   r5   r   r   D   s    $1r           modulequerykeyvalueattention_maskscalingdropoutc           
      K   s|   t ||dd| }|d ur|| }tjj|dt jd|j}tjj	||| j
d}t ||}	|	dd }	|	|fS )Nr#   r<   )r?   rE   )ptrainingr   r!   )r   matmulrS   r   rJ   ZsoftmaxZfloat32rH   rE   ra   rc   
contiguous)
r[   r\   r]   r^   r_   r`   ra   kwargsattn_weightsattn_outputr   r   r   eager_attention_forward   s   
ri   c                       sn   e Zd ZdZdef fddZ			ddejdeej d	eej d
ee	 de
ejeej f f
ddZ  ZS )IdeficsVisionAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr    c                    s   t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	|j
| _d| _t| j| j| _t| j| j| _t| j| j| _t| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      F)r%   r&   r    r'   r(   Znum_attention_heads	num_headshead_dimrK   scaleZattention_dropoutra   	is_causalr   Lineark_projv_projq_projout_projr3   r5   r   r   r&      s$   

zIdeficsVisionAttention.__init__NFr   r_   causal_attention_maskoutput_attentionsr:   c              
   C   sL  |j \}}}| |}| |}	| |}
|||| j| jdd}|	||| j| jdd}	|
||| j| jdd}
| jj	dkrY|durR|durR|| }n|durX|}n|du| _
t}| jj	dkrz| jj	dkrt|rttd nt| jj	 }|| ||	|
|| j
| j| jsdn| jd	\}}|||| }| |}|sd}||fS )
z#Input shape: Batch x Time x Channelr   r!   Zflash_attention_2NeagerZsdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.rZ   )rn   r`   ra   )r@   rr   rp   rq   rL   rk   rl   rS   r    Z_attn_implementationrn   ri   rF   rG   r   rm   rc   ra   rC   re   rs   )r4   r   r_   rt   ru   rT   Z
seq_lengthr(   ZquerieskeysvaluesZattention_interfacerh   rg   r   r   r   rU      sH   	






zIdeficsVisionAttention.forward)NNF)r   r   r   r   r   r&   r   rW   r   rX   r   rU   rY   r   r   r5   r   rj      s"    rj   c                       s2   e Zd Z fddZdejdejfddZ  ZS )IdeficsVisionMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S N)r%   r&   r    r	   Z
hidden_actactivation_fnr   ro   r'   Zintermediate_sizefc1fc2r3   r5   r   r   r&     s
   
zIdeficsVisionMLP.__init__r   r:   c                 C   s"   |  |}| |}| |}|S rz   )r|   r{   r}   )r4   r   r   r   r   rU     s   


zIdeficsVisionMLP.forward)r   r   r   r&   r   rW   rU   rY   r   r   r5   r   ry     s    ry   c                       sT   e Zd Zdef fddZ	ddejdejdejdee d	e	ej
 f
d
dZ  ZS )IdeficsVisionEncoderLayerr    c                    sR   t    |j| _t|| _tj| j|jd| _	t
|| _tj| j|jd| _d S N)Zeps)r%   r&   r'   r(   rj   	self_attnr   	LayerNormlayer_norm_epslayer_norm1ry   mlplayer_norm2r3   r5   r   r   r&     s   


z"IdeficsVisionEncoderLayer.__init__Fr   r_   rt   ru   r:   c                 C   sd   |}|  |}| j||||d\}}|| }|}| |}| |}|| }|f}|r0||f7 }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   r_   rt   ru   )r   r   r   r   )r4   r   r_   rt   ru   Zresidualrg   Zoutputsr   r   r   rU   '  s"   




z!IdeficsVisionEncoderLayer.forwardrV   )r   r   r   r   r&   r   rW   r   rX   r   r   rU   rY   r   r   r5   r   r~     s    r~   c                       st   e Zd ZdZdef fddZ					ddeej deej dee	 d	ee	 d
ee	 de
eef fddZ  ZS )IdeficsVisionEncoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`IdeficsVisionEncoderLayer`].

    Args:
        config: IdeficsVisionConfig
    r    c                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r   )r~   ).0_r    r   r   
<listcomp>]  s    z1IdeficsVisionEncoder.__init__.<locals>.<listcomp>F)	r%   r&   r    r   Z
ModuleListrangeZnum_hidden_layerslayersgradient_checkpointingr3   r5   r   r   r&   Z  s   
 
zIdeficsVisionEncoder.__init__Nr_   rt   ru   output_hidden_statesreturn_dictr:   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|r"dnd}|r(dnd}|}	t| jD ]1\}
}|r<||	f }| jrM| jrM| |j	|	|||}n||	|||d}|d }	|rb||d f }q1|rj||	f }|sxt
dd |	||fD S t|	||dS )	a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Causal mask for the text model. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr   )ru   r   r   c                 s   s    | ]	}|d ur|V  qd S rz   r   )r   vr   r   r   	<genexpr>  s    z/IdeficsVisionEncoder.forward.<locals>.<genexpr>)r   r   r   )r    ru   r   use_return_dict	enumerater   r   rc   Z_gradient_checkpointing_func__call__tupler
   )r4   inputs_embedsr_   rt   ru   r   r   Zencoder_statesZall_attentionsr   idxZencoder_layerZlayer_outputsr   r   r   rU   `  sF   &

zIdeficsVisionEncoder.forward)NNNNN)r   r   r   r   r   r&   r   r   rW   rX   r   r   r
   rU   rY   r   r   r5   r   r   Q  s*    	
r   c                       sn   e Zd Zdef fddZ					ddeej dee dee d	ee d
ee de	e
ef fddZ  ZS )IdeficsVisionTransformerr    c                    sR   t    || _|j}t|| _tj||jd| _	t
|| _tj||jd| _d S r   )r%   r&   r    r'   r   r7   r   r   r   pre_layrnormr   encoderpost_layernorm)r4   r    r(   r5   r   r   r&     s   


z!IdeficsVisionTransformer.__init__NFrO   ru   r   rN   r   r:   c           
      C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|du r&td| j||d}| |}| j||||d}|d }|dddddf }	| |	}	|s[||	f|dd  S t	||	|j
|jdS )z
        Returns:

        Nz You have to specify pixel_values)rN   )r   ru   r   r   r   r   )r   Zpooler_outputr   r   )r    ru   r   r   rK   r7   r   r   r   r   r   r   )
r4   rO   ru   r   rN   r   r   Zencoder_outputsr   Zpooled_outputr   r   r   rU     s2   

z IdeficsVisionTransformer.forward)NNNFN)r   r   r   r   r&   r   r   r   rX   r   r   r   rU   rY   r   r   r5   r   r     s(    
r   )rZ   )&r   rA   dataclassesr   typingr   r   r   r   r   Ztorch.utils.checkpointr   Zactivationsr	   Zmodeling_outputsr
   r   Zmodeling_utilsr   utilsr   r   Zconfiguration_ideficsr   Z
get_loggerr   rF   r   Moduler   rW   rI   ri   rj   ry   r~   r   r   r   r   r   r   <module>   sJ   
k
P3b