o
    ZhL>                    @   s  d Z ddlZddlmZ ddlmZmZmZmZm	Z	 ddl
Z
ddlZ
ddl
mZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZmZ ddlmZmZ ddlmZ ddlmZmZm Z  ddl!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z' ddl(m)Z)m*Z*m+Z+ ddl,m-Z-m.Z.m/Z/ e&0e1Z2eG dd de#Z3G dd dej4Z5	dIdej4de
j6de
j6de
j6dee
j6 de7de7fdd Z8G d!d" d"ej4Z9G d#d$ d$ej4Z:G d%d& d&ej4Z;e$G d'd( d(eZ<G d)d* d*ej4Z=G d+d, d,e<Z>G d-d. d.ej4Z?G d/d0 d0ej4Z@G d1d2 d2ej4ZAG d3d4 d4ej4ZBG d5d6 d6ej4ZCG d7d8 d8ej4ZDG d9d: d:ej4ZEG d;d< d<ej4ZFG d=d> d>e<ZGG d?d@ d@ee"ZHe$dAdBG dCdD dDe<ZIe$dEdBG dFdG dGe<eZJg dHZKdS )JzPyTorch InstructBLIP model.    N)	dataclass)AnyCallableOptionalTupleUnion)nn   )ACT2FN)GenerationMixin)FlashAttentionKwargs)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentionsBaseModelOutputWithPooling,BaseModelOutputWithPoolingAndCrossAttentions)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)apply_chunking_to_forward find_pruneable_heads_and_indicesprune_linear_layer)
LossKwargsModelOutputauto_docstringcan_return_tuplelogging	torch_int   )	AutoModelAutoModelForCausalLMAutoModelForSeq2SeqLM   )InstructBlipConfigInstructBlipQFormerConfigInstructBlipVisionConfigc                   @   s   e Zd ZU dZdZeeej  e	d< dZ
eeej  e	d< dZeej e	d< dZeeej  e	d< dZeeej  e	d< dee fd	d
ZdS )/InstructBlipForConditionalGenerationModelOutputa)  
    Class defining the outputs of [`InstructBlipForConditionalGeneration`].

    Args:
        loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
            Language modeling loss from the language model.
        logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
            Prediction scores of the language modeling head of the language model.
        vision_outputs (`BaseModelOutputWithPooling`):
            Outputs of the vision encoder.
        qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
            Outputs of the Q-Former (Querying Transformer).
        language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
            Outputs of the language model.
    Nlosslogitsvision_outputsqformer_outputslanguage_model_outputsreturnc                    s   t  fdd  D S )Nc                 3   s.    | ]}|d vr | nt  | V  qdS )r(   r)   r*   N)getattrto_tuple).0kself e/var/www/auris/lib/python3.10/site-packages/transformers/models/instructblip/modeling_instructblip.py	<genexpr>G   s    
zKInstructBlipForConditionalGenerationModelOutput.to_tuple.<locals>.<genexpr>)tuplekeysr1   r3   r1   r4   r.   F   s   z8InstructBlipForConditionalGenerationModelOutput.to_tuple)__name__
__module____qualname____doc__r&   r   r   torchFloatTensor__annotations__r'   r(   r)   r*   r   r.   r3   r3   r3   r4   r%   -   s   
 r%   c                       s\   e Zd Zdef fddZdejdededejfdd	Zddej	de
dejfddZ  ZS )InstructBlipVisionEmbeddingsconfigc                    s   t    || _|j| _|j| _|j| _tt	
dd| j| _tjd| j| j| jd| _| j| j d | _| jd | _tt	
d| j| j| _d S )Nr!   r	   )Zin_channelsZout_channelsZkernel_sizeZstrider   )super__init__r@   hidden_size	embed_dimZ
image_size
patch_sizer   	Parameterr<   Zrandnclass_embeddingConv2dpatch_embeddingnum_patchesnum_positionsposition_embeddingr2   r@   	__class__r3   r4   rB   Q   s   
z%InstructBlipVisionEmbeddings.__init__
embeddingsheightwidthr+   c                 C   s   |j d d }| jj d d }tj s||kr||kr| jS | jddddf }| jddddf }|j d }|| j }	|| j }
t|d }|d|||}|dddd}t	j
j||	|
fdd	d
}|dddddd|}tj||fddS )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r!   Ng      ?r   r	   r   ZbicubicF)sizemodeZalign_cornersdim)shaperL   r<   Zjit
is_tracingrE   r   reshapepermuter   
functionalZinterpolateviewcat)r2   rP   rQ   rR   rJ   rK   Zclass_pos_embedZpatch_pos_embedrW   Z
new_heightZ	new_widthZsqrt_num_positionsr3   r3   r4   interpolate_pos_encodingc   s(   



z5InstructBlipVisionEmbeddings.interpolate_pos_encodingFpixel_valuesr_   c                 C   s   |j \}}}}| jjj}| |j|d}|ddd}| j|dd|}	t	j
|	|gdd}
|r<| |
||}n| j}|
|d d d |
dd d f | }
|
S )Ndtyper   r!   rS   rV   )rX   rI   weightrb   toflatten	transposerG   expandr<   r^   r_   rL   rT   )r2   r`   r_   
batch_size_rQ   rR   Ztarget_dtypeZpatch_embedsZclass_embedsrP   rL   r3   r3   r4   forward   s   
*z$InstructBlipVisionEmbeddings.forwardF)r8   r9   r:   r$   rB   r<   Tensorintr_   r=   boolrj   __classcell__r3   r3   rN   r4   r?   P   s    $(r?           modulequerykeyvalueattention_maskscalingdropoutc           
      K   sp   t ||dd| }|d ur|| }tjj|dd}tjj||| jd}t ||}	|	dd }	|	|fS )NrS   rV   )ptrainingr!   r   )	r<   matmulrf   r   r\   Zsoftmaxrw   rz   
contiguous)
rq   rr   rs   rt   ru   rv   rw   kwargsattn_weightsattn_outputr3   r3   r4   eager_attention_forward   s   
r   c                       s   e Zd ZdZ fddZdejdedefddZ			
ddejde	ej de	e
 deeje	ej e	eej  f fddZ  ZS )InstructBlipAttentionz=Multi-headed attention from 'Attention Is All You Need' paperc                    s  t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	d| _
|j| _tj| jd| j dd| _|jr]tt| j}tt| j}nd }d }|d uryt|tj|dd|f}t|| j_t| j| j| _d S )	Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      Fr	   )bias)Zrequires_grad)rA   rB   r@   rC   rD   num_attention_heads	num_headshead_dim
ValueErrorscaleZ	is_causalattention_dropoutr   Linearqkvqkv_biasrF   r<   zerosr^   Z
zeros_liker   
projection)r2   r@   Zq_biasZv_biasr   rN   r3   r4   rB      s0   

zInstructBlipAttention.__init__tensorseq_lenbszc                 C   s    | ||| j| jdd S )Nr!   r   )r]   r   r   rf   r|   )r2   r   r   r   r3   r3   r4   _shape   s    zInstructBlipAttention._shapeNFhidden_states	head_maskoutput_attentionsr+   c                 K   s   |  \}}}| |}|||d| j|| j ddddd}|d |d |d }	}
}t}| jjdkrJ| jjdkrD|rDt	d nt
| jj }|| |	|
|fd	| jsVd
n| j| jd|\}}|||d }| |}|rx||f}|S |d	f}|S )z#Input shape: Batch x Time x Channelr	   r   r   r!      eagerZsdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.Nrp   )ru   rw   rv   rS   )rT   r   rZ   r   r[   r   r@   Z_attn_implementationloggerwarning_oncer   rz   r   r   r|   r   )r2   r   r   r   r}   r   Ztgt_lenrD   Z	mixed_qkvZquery_statesZ
key_statesZvalue_statesZattention_interfacer   r~   outputsr3   r3   r4   rj      s>   	



zInstructBlipAttention.forward)NF)r8   r9   r:   r;   rB   r<   rl   rm   r   r   rn   r   rj   ro   r3   r3   rN   r4   r      s    r   c                       2   e Zd Z fddZdejdejfddZ  ZS )InstructBlipMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S N)rA   rB   r@   r
   
hidden_actactivation_fnr   r   rC   intermediate_sizefc1fc2rM   rN   r3   r4   rB   	  s
   
zInstructBlipMLP.__init__r   r+   c                 C   s"   |  |}| |}| |}|S r   )r   r   r   r2   r   r3   r3   r4   rj     s   


zInstructBlipMLP.forwardr8   r9   r:   rB   r<   rl   rj   ro   r3   r3   rN   r4   r     s    r   c                
       sN   e Zd Zdef fddZ	ddejdejdee de	ej
 fd	d
Z  ZS )InstructBlipEncoderLayerr@   c                    sR   t    |j| _t|| _tj| j|jd| _	t
|| _tj| j|jd| _d S NZeps)rA   rB   rC   rD   r   	self_attnr   	LayerNormlayer_norm_epslayer_norm1r   mlplayer_norm2rM   rN   r3   r4   rB     s   


z!InstructBlipEncoderLayer.__init__Fr   ru   r   r+   c                 C   sb   |}|  |}| j|||d\}}|| }|}| |}| |}|| }|f}|r/||f7 }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   r   r   )r   r   r   r   )r2   r   ru   r   Zresidualr~   r   r3   r3   r4   rj   !  s    




z InstructBlipEncoderLayer.forwardrk   )r8   r9   r:   r"   rB   r<   rl   r   rn   r   r=   rj   ro   r3   r3   rN   r4   r     s    r   c                   @   sD   e Zd ZeZdZdZdZdZdZ	dZ
dZdZdZg dZdd ZdS )InstructBlipPreTrainedModelZblipTF)InstructBlipQFormerEmbeddingsr   %InstructBlipQFormerMultiHeadAttentionInstructBlipQFormerSelfOutputc                 C   s   | j j}t|tjtjfr%|jjjd|d |j	dur#|j	j
  dS dS t|tjr6|jjjd|d dS t|tjrK|j	j
  |jjd dS t|trftjj|jd|d tjj|jd|d dS t|ttfru|jj
  dS dS )zInitialize the weightsrp   )meanZstdN      ?)r@   Zinitializer_range
isinstancer   r   rH   rc   dataZnormal_r   Zzero_	Embeddingr   Zfill_r?   initZtrunc_normal_rL   rG   $InstructBlipForConditionalGenerationInstructBlipModelquery_tokens)r2   rq   factorr3   r3   r4   _init_weights\  s"   

z)InstructBlipPreTrainedModel._init_weightsN)r8   r9   r:   r"   config_classZbase_model_prefixZsupports_gradient_checkpointing_supports_attention_backend_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_static_cache_supports_quantized_cache_no_split_modulesr   r3   r3   r3   r4   r   H  s    r   c                       sh   e Zd ZdZdef fddZ				ddeej dee	 dee	 d	ee	 d
e
eef f
ddZ  ZS )InstructBlipEncodera  
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`InstructBlipEncoderLayer`].

    Args:
        config (`InstructBlipConfig`):
            The corresponding vision configuration for the `InstructBlipEncoder`.
    r@   c                    :   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r3   )r   )r/   ri   r@   r3   r4   
<listcomp>~  s    z0InstructBlipEncoder.__init__.<locals>.<listcomp>F)	rA   rB   r@   r   
ModuleListrangenum_hidden_layerslayersgradient_checkpointingrM   rN   r   r4   rB   {  s   
 
zInstructBlipEncoder.__init__Nru   r   output_hidden_statesreturn_dictr+   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|r"dnd}|r(dnd}|}t| jD ]/\}	}
|r<||f }| jrL| jrL| |
j	|||}n|
|||d}|d }|r`||d f }q1|rh||f }|svt
dd |||fD S t|||dS )	a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
                Embedded representation of the inputs. Should be float, not int tokens.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr3   r   r   r!   c                 s       | ]	}|d ur|V  qd S r   r3   r/   vr3   r3   r4   r5     s    z.InstructBlipEncoder.forward.<locals>.<genexpr>)last_hidden_stater   
attentions)r@   r   r   use_return_dict	enumerater   r   rz   _gradient_checkpointing_func__call__r6   r   )r2   inputs_embedsru   r   r   r   Zencoder_statesZall_attentionsr   idxZencoder_layerlayer_outputsr3   r3   r4   rj     sB   

zInstructBlipEncoder.forward)NNNN)r8   r9   r:   r;   r"   rB   r   r<   rl   rn   r   r   r   rj   ro   r3   r3   rN   r4   r   q  s$    		
r   c                       s~   e Zd ZdZeZdef fddZe					ddee	j
 dee dee d	ee d
edeeef fddZdd Z  ZS )InstructBlipVisionModelr`   r@   c                    sJ   t  | || _|j}t|| _t|| _tj	||j
d| _|   d S r   )rA   rB   r@   rC   r?   rP   r   encoderr   r   r   post_layernorm	post_init)r2   r@   rD   rN   r3   r4   rB     s   

z InstructBlipVisionModel.__init__NFr   r   r   r_   r+   c           
      C   s   |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u r&td| j||d}| j||||d}|d }| |}|d d dd d f }	| |	}	|s[||	f|dd   S t||	|j	|j
dS )Nz You have to specify pixel_values)r_   )r   r   r   r   r   r!   )r   pooler_outputr   r   )r@   r   r   r   r   rP   r   r   r   r   r   )
r2   r`   r   r   r   r_   r   encoder_outputsr   pooled_outputr3   r3   r4   rj     s2   	

zInstructBlipVisionModel.forwardc                 C      | j S r   )rP   r1   r3   r3   r4   get_input_embeddings     z,InstructBlipVisionModel.get_input_embeddingsNNNNF)r8   r9   r:   main_input_namer$   r   rB   r   r   r<   r=   rn   r   r   r   rj   r   ro   r3   r3   rN   r4   r     s0    
*r   c                       s\   e Zd Zd fdd	Zdd Zdd Zdd	 Zd
d Zdd Z						dddZ	  Z
S )r   Fc                    s"  t    || _|j|j dkrt|dstd|j|jf |j| _t|j|j | _| j| j | _	t
|j| j	| _|rQt
|j| j	| _t
|j| j	| _nt
|j| j	| _t
|j| j	| _t
|j| _t|dd| _| jdks{| jdkr|j| _t
d|j d	 | j| _d
| _d S )Nr   Zembedding_sizezLThe hidden size (%d) is not a multiple of the number of attention heads (%d)position_embedding_typeabsoluterelative_keyrelative_key_queryr   r!   F)rA   rB   r@   rC   r   hasattrr   rm   attention_head_sizeall_head_sizer   r   rr   Zencoder_hidden_sizers   rt   DropoutZattention_probs_dropout_probrw   r-   r   max_position_embeddingsr   distance_embeddingsave_attentionr2   r@   is_cross_attentionrN   r3   r4   rB     s.   


z.InstructBlipQFormerMultiHeadAttention.__init__c                 C   
   || _ d S r   attn_gradients)r2   r   r3   r3   r4   save_attn_gradients#     
z9InstructBlipQFormerMultiHeadAttention.save_attn_gradientsc                 C   r   r   r   r1   r3   r3   r4   get_attn_gradients&  r   z8InstructBlipQFormerMultiHeadAttention.get_attn_gradientsc                 C   r   r   attention_map)r2   r   r3   r3   r4   save_attention_map)  r   z8InstructBlipQFormerMultiHeadAttention.save_attention_mapc                 C   r   r   r   r1   r3   r3   r4   get_attention_map,  r   z7InstructBlipQFormerMultiHeadAttention.get_attention_mapc                 C   s6   |  d d | j| jf }|j| }|ddddS )NrS   r   r   r!   r	   )rT   r   r   r]   r[   )r2   xZnew_x_shaper3   r3   r4   transpose_for_scores/  s   
z:InstructBlipQFormerMultiHeadAttention.transpose_for_scoresNc                 C   s  |d u}|r|  | |}	|  | |}
|}n;|d urD|  | |}	|  | |}
tj|d |	gdd}	tj|d |
gdd}
n|  | |}	|  | |}
| |}|  |}|	|
f}t||	dd}| jdksv| jdkr|	 d }tj
|tj|jd	dd}tj
|tj|jd	dd}|| }| || j d }|j|jd
}| jdkrtd||}|| }n| jdkrtd||}td|	|}|| | }|t| j }|j}|d ur|| }tjdd||}|r| jr| | || j | |}|d ur|| }t||
}|dddd }|	 d d | jf }|j| }|r=||fn|f}||f }|S )Nr   r   rV   r!   rS   rx   r   r   rb   devicera   zbhld,lrd->bhlrzbhrd,lrd->bhlrr	   ) r  rs   rt   r<   r^   rr   r{   rf   r   rT   arangelongr  r]   r   r   rd   rb   Zeinsummathsqrtr   r   ZSoftmaxr   r   register_hookr   rw   r[   r|   r   )r2   r   ru   r   encoder_hidden_statesencoder_attention_maskpast_key_valuer   r   Z	key_layerZvalue_layerZmixed_query_layerZquery_layerZattention_scores
seq_lengthZposition_ids_lZposition_ids_rZdistanceZpositional_embeddingZrelative_position_scoresZrelative_position_scores_queryZrelative_position_scores_keyZattention_scores_dtypeZattention_probsZattention_probs_droppedZcontext_layerZnew_context_layer_shaper   r3   r3   r4   rj   4  s`   









z-InstructBlipQFormerMultiHeadAttention.forwardrk   NNNNNF)r8   r9   r:   rB   r   r   r   r   r  rj   ro   r3   r3   rN   r4   r     s    r   c                       8   e Zd Z fddZdejdejdejfddZ  ZS )r   c                    sB   t    t|j|j| _tj|j|jd| _t|j	| _
d S r   )rA   rB   r   r   rC   denser   r   r   hidden_dropout_probrw   rM   rN   r3   r4   rB        
z&InstructBlipQFormerSelfOutput.__init__r   input_tensorr+   c                 C   &   |  |}| |}| || }|S r   r  rw   r   r2   r   r  r3   r3   r4   rj        

z%InstructBlipQFormerSelfOutput.forwardr   r3   r3   rN   r4   r         $r   c                       s   e Zd Zd fdd	Zdd Z						ddejdeej d	eej d
eej deej dee	e	ej   dee
 de	ej fddZ  ZS )InstructBlipQFormerAttentionFc                    s,   t    t||| _t|| _t | _d S r   )rA   rB   r   	attentionr   outputsetpruned_headsr   rN   r3   r4   rB     s   

z%InstructBlipQFormerAttention.__init__c                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r!   rV   )lenr   r  r   r   r  r   rr   rs   rt   r  r  r   union)r2   headsindexr3   r3   r4   prune_heads  s   z(InstructBlipQFormerAttention.prune_headsNr   ru   r   r	  r
  r  r   r+   c              	   C   s<   |  |||||||}| |d |}	|	f|dd   }
|
S )Nr   r!   )r  r  )r2   r   ru   r   r	  r
  r  r   Zself_outputsattention_outputr   r3   r3   r4   rj     s   
	z$InstructBlipQFormerAttention.forwardrk   r  )r8   r9   r:   rB   r!  r<   rl   r   r=   r   rn   rj   ro   r3   r3   rN   r4   r    s4    	r  c                       r   )InstructBlipQFormerIntermediatec                    sD   t    t|j|j| _t|jt	rt
|j | _d S |j| _d S r   )rA   rB   r   r   rC   r   r  r   r   strr
   intermediate_act_fnrM   rN   r3   r4   rB     s
   
z(InstructBlipQFormerIntermediate.__init__r   r+   c                 C   s   |  |}| |}|S r   )r  r%  r   r3   r3   r4   rj     s   

z'InstructBlipQFormerIntermediate.forwardr   r3   r3   rN   r4   r#    s    r#  c                       r  )InstructBlipQFormerOutputc                    sB   t    t|j|j| _tj|j|jd| _t	|j
| _d S r   )rA   rB   r   r   r   rC   r  r   r   r   r  rw   rM   rN   r3   r4   rB     r  z"InstructBlipQFormerOutput.__init__r   r  r+   c                 C   r  r   r  r  r3   r3   r4   rj     r  z!InstructBlipQFormerOutput.forwardr   r3   r3   rN   r4   r&    r  r&  c                       sD   e Zd Z fddZ							dddZdd	 Zd
d Z  ZS )InstructBlipQFormerLayerc                    s~   t    |j| _d| _t|| _|| _||j dkr&t|dd| _d| _	nd| _	t
|| _t|| _t
|| _t|| _d S )Nr!   r   T)r   F)rA   rB   chunk_size_feed_forwardseq_len_dimr  r  	layer_idxZcross_attention_frequencycrossattentionhas_cross_attentionr#  intermediater&  r  intermediate_queryoutput_query)r2   r@   r*  rN   r3   r4   rB     s   




z!InstructBlipQFormerLayer.__init__NFr   c	              	   C   s:  |d ur
|d d nd }	| j |||||	d}
|
d }|
dd }|
d }|dkr|d d d |d d f }| jrW|d u r@td| j||||||d}|d }||dd  }t| j| j| j|}|jd |krt| j	| j| j|d d |d d d f }t
j||gdd}n
t| j	| j| j|}|f| }||f }|S )	Nr   )r   r  r   r!   rS   z>encoder_hidden_states must be given for cross-attention layersr   rV   )r  r,  r   r+  r   feed_forward_chunk_queryr(  r)  rX   feed_forward_chunkr<   r^   )r2   r   ru   r   r	  r
  r  r   query_lengthZself_attn_past_key_valueZself_attention_outputsr"  r   Zpresent_key_valueZquery_attention_outputZcross_attention_outputslayer_outputZlayer_output_textr3   r3   r4   rj      sd   

z InstructBlipQFormerLayer.forwardc                 C      |  |}| ||}|S r   )r-  r  r2   r"  Zintermediate_outputr3  r3   r3   r4   r1  G     
z+InstructBlipQFormerLayer.feed_forward_chunkc                 C   r4  r   )r.  r/  r5  r3   r3   r4   r0  L  r6  z1InstructBlipQFormerLayer.feed_forward_chunk_query)NNNNNFr   )r8   r9   r:   rB   rj   r1  r0  ro   r3   r3   rN   r4   r'    s    
Gr'  c                       s:   e Zd Z fddZ										d	ddZ  ZS )
InstructBlipQFormerEncoderc                    r   )Nc                    s   g | ]}t  |qS r3   )r'  )r/   r*  r   r3   r4   r   X      z7InstructBlipQFormerEncoder.__init__.<locals>.<listcomp>F)	rA   rB   r@   r   r   r   r   layerr   rM   rN   r   r4   rB   T  s   

z#InstructBlipQFormerEncoder.__init__NFTr   c              
   C   sV  |	rdnd }|r
dnd }|rdnd }|rdnd }t | jjD ]l}| j| }|	r,||f }|d ur4|| nd }|d ur>|| nd }t| jddr_| jr_|rStd d}| |j	|||||}n|||||||||}|d }|rw||d f7 }|r||d f }|j
r||d f }q|	r||f }|
std	d
 |||||fD S t|||||dS )Nr3   r   FzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...r   rS   r!   r   c                 s   r   r   r3   r   r3   r3   r4   r5     s    z5InstructBlipQFormerEncoder.forward.<locals>.<genexpr>)r   past_key_valuesr   r   cross_attentions)r   r@   r   r9  r-   rz   r   warningr   r   r,  r6   r   )r2   r   ru   r   r	  r
  r:  	use_cacher   r   r   r2  Zall_hidden_statesZall_self_attentionsZall_cross_attentionsZnext_decoder_cacheiZlayer_moduleZlayer_head_maskr  r   r3   r3   r4   rj   \  sx   

	
z"InstructBlipQFormerEncoder.forward)
NNNNNNFFTr   )r8   r9   r:   rB   rj   ro   r3   r3   rN   r4   r7  S  s    r7  c                       s2   e Zd ZdZ fddZ				dddZ  ZS )	r   z;Construct the embeddings from word and position embeddings.c                    s   t    tj|j|j|jd| _t|j|j| _	tj
|j|jd| _t|j| _| jdt|jddd t|dd| _|| _d S )	N)Zpadding_idxr   position_ids)r!   rS   F)
persistentr   r   )rA   rB   r   r   
vocab_sizerC   Zpad_token_idword_embeddingsr   position_embeddingsr   r   	layernormr   r  rw   Zregister_bufferr<   r  rg   r-   r   r@   rM   rN   r3   r4   rB     s   

z&InstructBlipQFormerEmbeddings.__init__Nr   c                 C   s   |d ur|  d }nd}|d u r | jd d ||| f  }|d urI| |}| jdkr;| ||j}|| }|d urHtj	||fdd}n|}|| j
jj}| 
|}| |}|S )Nr!   r   r   rV   )rT   r?  clonerB  r   rC  rd   r  r<   r^   rD  rc   rb   rw   )r2   	input_idsr?  query_embedspast_key_values_lengthr  rP   rC  r3   r3   r4   rj     s$   



z%InstructBlipQFormerEmbeddings.forward)NNNr   )r8   r9   r:   r;   rB   rj   ro   r3   r3   rN   r4   r     s    r   c                       s"  e Zd ZdZdZdZdZdZdef fddZ	dd Z
dd	 Zd
d Z	d!dejdee dejdedejf
ddZ											d"dejdeej deej deej deej deej deej deeeej   dee dee dee dee deeej ef fdd Z  ZS )#InstructBlipQFormerModelz
    Querying Transformer (Q-Former), used in InstructBLIP. Slightly modified from BLIP-2 as it also takes the
    instruction as input.
    Fr@   c                    s2   t  | || _t|| _t|| _|   d S r   )rA   rB   r@   r   rP   r7  r   r   rM   rN   r3   r4   rB     s
   

z!InstructBlipQFormerModel.__init__c                 C   s   | j jS r   rP   rB  r1   r3   r3   r4   r     s   z-InstructBlipQFormerModel.get_input_embeddingsc                 C   s   || j _d S r   rJ  r2   rt   r3   r3   r4   set_input_embeddings  s   z-InstructBlipQFormerModel.set_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr   r9  r  r!  )r2   Zheads_to_pruner9  r  r3   r3   r4   _prune_heads   s   z%InstructBlipQFormerModel._prune_headsru   input_shaper  	has_queryr+   c                 C   s   |  dkr|dddddddf }n|  dkr(|ddddddf }ntd| d|j d|j| jd}d| d	 }|S )
a>  
        Makes broadcastable attention and causal masks so that future and masked tokens are ignored.

        Arguments:
            attention_mask (`torch.Tensor`):
                Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
            input_shape (`Tuple[int]`):
                The shape of the input to the model.
            device: (`torch.device`):
                The device of the input to the model.

        Returns:
            `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
        r	   Nr   z!Wrong shape for input_ids (shape z) or attention_mask (shape )ra   r   g     )rW   r   rX   rd   rb   )r2   ru   rO  r  rP  extended_attention_maskr3   r3   r4   get_extended_attention_mask  s   	z4InstructBlipQFormerModel.get_extended_attention_maskNrF  r?  rG  r   r	  r
  r:  r=  r   r   r   c                    s  |
dur|
n j j}
|dur|n j j}|dur|n j j}|du r*|du r*td|dur;|d d jd  j j nd}|durF|jd nd} j||||d}| dd }|\}}|j	}|du rot
j||| f|d} |||}|durt|tr|d  \}}}n| \}}}||f}t|tr fd	d
|D }n|du rt
j||d} |}n |}nd} | j j} j|||||||	|
|||d}|d }|dddddf }|s||f|dd  S t|||j|j|j|jdS )a  
        encoder_hidden_states  (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
            the model is configured as a decoder.
        encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
            the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.
        past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
            shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
            value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
            used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
            value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
            `(batch_size, sequence_length)`.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        Nz7You have to specify query_embeds when input_ids is Noner   r   r!   )rF  r?  rG  rH  rS   )r  c                    s   g | ]}  |qS r3   )invert_attention_mask)r/   maskr1   r3   r4   r     r8  z4InstructBlipQFormerModel.forward.<locals>.<listcomp>)
ru   r   r	  r
  r:  r=  r   r   r   r2  )r   r   r:  r   r   r;  )r@   r   r   r   r   rX   r2  rP   rT   r  r<   onesrS  r   listrT  Zget_head_maskr   r   r   r:  r   r   r;  )r2   rF  ru   r?  rG  r   r	  r
  r:  r=  r   r   r   rH  r2  Zembedding_outputrO  rh   r  r  rR  Zencoder_batch_sizeZencoder_sequence_lengthri   Zencoder_hidden_shapeZencoder_extended_attention_maskr   Zsequence_outputr   r3   r1   r4   rj   3  sv   "$

z InstructBlipQFormerModel.forwardrk   )NNNNNNNNNNN)r8   r9   r:   r;   r   r   r   r   r#   rB   r   rL  rN  r<   rl   r   rm   r  rn   rS  
LongTensorr   r=   r   r   rj   ro   r3   r3   rN   r4   rI    sx    

.	
rI  c                   @   s   e Zd ZdS )KwargsForCausalLMN)r8   r9   r:   r3   r3   r3   r4   rY    s    rY  z[
    InstructBLIP base Model consisting of language model, qformer and vision encoder.
    )Zcustom_introc                !       s   e Zd ZdZdgZdef fddZdd Zdd	 Zd
d Z	dd Z
ee										ddejdejdeej deej deej deej deej dee dee dee dedee dee deeef fddZ  ZS ) r   r`   r   r@   c                    s   t  | t|j| _ttd|j	|j
j| _t|j
| _t|j
j|jj| _t|j| _| jjd ur@| j| jj | jjd urN| j| jj |   d S Nr!   )rA   rB   r   vision_configvision_modelr   rF   r<   r   num_query_tokensqformer_configrC   r   rI  qformerr   text_configlanguage_projectionr   from_configlanguage_modelr   extend_keep_in_fp32_modulesr   rM   rN   r3   r4   rB     s   zInstructBlipModel.__init__c                 C   
   | j  S r   rc  r   r1   r3   r3   r4   r     r   z&InstructBlipModel.get_input_embeddingsc                 C      | j | d S r   rc  rL  rK  r3   r3   r4   rL       z&InstructBlipModel.set_input_embeddingsc                 C   ,   | j js| jj| jj_| jj| jj_d S d S r   r@   use_decoder_only_language_modelrc  Zsharedr   Zembed_tokensdecoderr1   r3   r3   r4   _tie_weights     zInstructBlipModel._tie_weightsc                 C   P   | j }t|dkrd|vrtj dkrtd t| jdr&d| jj	_
dS dS z
        Some pre-processing hacks to make the model `accelerate` compatible. Check
        https://github.com/huggingface/transformers/pull/21707 for more details.
        r!   rc  a  The `language_model` is not in the `hf_device_map` dictionary and you are running your script in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`. Please pass a `device_map` that contains `language_model` to remove this warning. Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for more details on creating a `device_map` for large models._hf_hookTNhf_device_mapr  r<   cudaZdevice_countr   r<  r   rc  rs  Zio_same_devicer2   ru  r3   r3   r4   _preprocess_accelerate     "z(InstructBlipModel._preprocess_accelerateNFqformer_input_idsqformer_attention_maskrF  ru   decoder_input_idsdecoder_attention_maskr   r   r   r_   r=  r}   r+   c                 K   s  |
dur|
n| j j}
| j|||	|
|d}|d }tj| dd tj|jd}| j	|j
d dd}tj| dd tj|jd}|du rLt|}tj||gdd}| j|||||||	|
d}|d ddd|dddf }| |}| j |}|du rt|}|| j jkd|}| ||< | j jr| jd||||	|
|d	|}n| jd||||||	|
|d
|}t|||dS )a  
        qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
            to serve as text prompt, which the Q-Former model will encode.

            Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
            details.

            [What are input IDs?](../glossary#input-ids)
        qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
            provided to serve as text prompt, which the language model can continue.

            Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
            details.

            [What are input IDs?](../glossary#input-ids)
        decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.

            Only relevant in case an encoder-decoder language model (like T5) is used.
        N)r`   r   r   r   r_   r   rS   r  r!   rV   )rF  ru   rG  r	  r
  r   r   r   r   ru   r   r   r   r=  )r   ru   r|  r}  r   r   r   r=  r,   r3   )r@   r   r\  r<   rV  rT   r  r  r   rg   rX   	ones_liker^   r_  ra  rc  r   image_token_id	unsqueeze	expand_asre   rm  r%   )r2   r`   rz  r{  rF  ru   r|  r}  r   r   r   r_   r=  r}   r(   image_embedsimage_attention_maskr   query_attention_maskquery_outputsquery_outputlanguage_model_inputsr   special_image_maskr   r3   r3   r4   rj     sx   0  
$



	zInstructBlipModel.forward)
NNNNNNNNFN)r8   r9   r:   r   re  r"   rB   r   rL  ro  rx  r   r   r<   r=   r   rX  rn   r   r   r   r   r%   rj   ro   r3   r3   rN   r4   r     sb    	

r   a  
    InstructBLIP Model for generating text given an image and an optional text prompt. The model consists of a vision
    encoder, Querying Transformer (Q-Former) and a language model.

    One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
    the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
    c                #       s  e Zd ZeZdZdZdZdZdgZ	def fddZ
dd	 Zd
d Zdd ZdejfddZdd Zdd Zdd Zdd Z			d-dejdejdeej dee dee f
ddZee											d.dejdejdeej d eej d!eej d"eej d#eej d$ee d%ee d&eej dee ded'ee d(ee dee e!f fd)d*Z"e# 					d/dejdeej deej d eej d!eej dedejfd+d,Z$  Z%S )0r   r`   TFr   r@   c                    s   t  | t|j| _tt	d|j
|jj| _t|j| _t|jj|jj| _|jr7t|j}nt|j}|jd urI| j|j |jd urU| j|j || _|   d S rZ  )rA   rB   r   Z_from_configr[  r\  r   rF   r<   r   r]  r^  rC   r   rI  r_  r   r`  ra  rm  r   rb  r    r   rd  re  rc  r   )r2   r@   rc  rN   r3   r4   rB   v  s   

z-InstructBlipForConditionalGeneration.__init__c                 C   rf  r   rg  r1   r3   r3   r4   r     r   z9InstructBlipForConditionalGeneration.get_input_embeddingsc                 C   rh  r   ri  rK  r3   r3   r4   rL    rj  z9InstructBlipForConditionalGeneration.set_input_embeddingsc                 C   rh  r   )rc  set_output_embeddings)r2   Znew_embeddingsr3   r3   r4   r    rj  z:InstructBlipForConditionalGeneration.set_output_embeddingsr+   c                 C   rf  r   )rc  get_output_embeddingsr1   r3   r3   r4   r    r   z:InstructBlipForConditionalGeneration.get_output_embeddingsc                 C   rf  r   )rc  get_encoderr1   r3   r3   r4   r    r   z0InstructBlipForConditionalGeneration.get_encoderc                 C   rf  r   )rc  get_decoderr1   r3   r3   r4   r    r   z0InstructBlipForConditionalGeneration.get_decoderc                 C   rk  r   rl  r1   r3   r3   r4   ro    rp  z1InstructBlipForConditionalGeneration._tie_weightsc                 C   rq  rr  rt  rw  r3   r3   r4   rx    ry  z;InstructBlipForConditionalGeneration._preprocess_accelerateNrz  r{  r_   r   c                 C   s   | j ||dd}|d }tj| dd tj|jd}| j|jd dd}	tj|	 dd tj|jd}
|du r@t	|}tj
|
|gdd}| j|||	||dd	}|d ddd|	dddf }| |}|rr|||fS |S )
a$  
        Encodes images into continuous embeddings that can be forwarded to the language model.

        Args:
            pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
                The tensors corresponding to the input images.
        T)r`   r_   r   r   NrS   r  r!   rV   )rF  ru   rG  r	  r
  r   )r\  r<   rV  rT   r  r  r   rg   rX   r  r^   r_  ra  )r2   r`   rz  r{  r_   r   r(   r  r  r   r  r  r  r  r3   r3   r4   get_image_features  s2     
$

z7InstructBlipForConditionalGeneration.get_image_featuresrF  ru   r|  r}  r   r   labelsr=  r}   c                 K   s  |dur|n| j j}| j||||dd\}}}|s| n|}|s%| n|}tj| dd tj|jd}| j	
 |}|du rGt|}t| j dddurc|| j jkd|}| ||< ntd tj|||jgdd	}tj|||jgdd	}| j jr| j	d||||	||d
|}|r|jn|d }d}|
dur| jd||
| j jjd|}n$| j	d||||||	||
|d	|}|r|jn|d }|r|jn|d }t|||||dS )a+  
        qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
            to serve as text prompt, which the Q-Former model will encode.

            Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
            details.

            [What are input IDs?](../glossary#input-ids)
        qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
            provided to serve as text prompt, which the language model can continue.

            Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
            details.

            [What are input IDs?](../glossary#input-ids)
        decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.

            Only relevant in case an encoder-decoder language model (like T5) is used.
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size -
            1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
            config.vocab_size]`

        Examples:

        ```python
        >>> from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> model = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b")
        >>> processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")

        >>> device = "cuda" if torch.cuda.is_available() else "cpu"
        >>> model.to(device)  # doctest: +IGNORE_RESULT

        >>> url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
        >>> prompt = "What is unusual about this image?"
        >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device)

        >>> outputs = model.generate(
        ...     **inputs,
        ...     do_sample=False,
        ...     num_beams=5,
        ...     max_length=256,
        ...     min_length=1,
        ...     top_p=0.9,
        ...     repetition_penalty=1.5,
        ...     length_penalty=1.0,
        ...     temperature=1,
        ... )
        >>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
        >>> print(generated_text)
        The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV, which is parked in the middle of a busy city street. This is an unconventional approach to ironing clothes, as it requires the man to balance himself and his ironing equipment on top of the vehicle while navigating through traffic. Additionally, the presence of taxis and other vehicles in the scene further emphasizes the unusual nature of this situation.
        ```NTrz  r{  r_   r   rS   r  r  A  Expanding inputs for image tokens in InstructBLIP should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your InstructBLIP model. Using processors without these attributes in the config is deprecated and will throw an error in v4.50.r!   rV   r~  r   )r'   r  rA  )	r   ru   r|  r}  r   r   r   r  r=  )r&   r'   r(   r)   r*   r3   )r@   r   r  r.   r<   rV  rT   r  r  rc  r   r  r-   r  r  r  re   r   r   r^   rd   rm  r'   Zloss_functionr`  rA  r&   r%   )r2   r`   rz  r{  rF  ru   r|  r}  r   r   r  r   r_   r=  r}   r  r(   r  Zlanguage_model_attention_maskr   r  r   r'   r&   r3   r3   r4   rj     s   W
	
z,InstructBlipForConditionalGeneration.forwardc                 K   s  t | dr	|   |jd }| j||||dd\}	}
}tj|	 dd tj|	jd}|du rZ| j	j
jg}t| j	dddurI| j	jg| j	j | }tj|gtj|jd}||d	}|du rct|}|  |}t| j	dddur|| j	jkd|}|	 |j||< n@td
 tj|	||	jgd	d}tj|||jgd	d}| jj	js|dd|	jd	  d	 |d< |dd|	jd	  |d< ||d}| jj	js||d< | jjdi ||}|S )a  
        Overrides `generate` function to be able to use the model as a conditional generator.

        Args:
            pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
                Input images to be processed.
            qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                The sequence used as a prompt to be fed to the Q-Former module.
            qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                Mask to avoid performing attention on padding token indices.
            input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                The sequence used as a prompt for the generation.
            attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                Mask to avoid performing attention on padding token indices.
            interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
                Whether to interpolate the positional encoding of the image embeddings.

        Returns:
            captions (list): A list of strings of length batch_size * num_captions.
        ru  r   Tr  NrS   r  r  r!   r  rV   
max_length   Z
min_length)r   ru   rF  r3   )r   rx  rX   r  r<   rV  rT   r  r  r@   r`  Zbos_token_idr-   r  r]  r   repeatr  r   r  r  re   rd   r   r   r^   rc  Zis_encoder_decodergetgenerate)r2   r`   rz  r{  rF  ru   r_   Zgenerate_kwargsrh   r  r(   r  Zlanguage_attention_maskZstart_tokensr   r  Zinputsr   r3   r3   r4   r    sR   





z-InstructBlipForConditionalGeneration.generate)NFF)NNNNNNNNNFNr   )&r8   r9   r:   r"   r   r   r   r   r   re  rB   r   rL  r  r   Moduler  r  r  ro  rx  r<   r=   rX  r   rn   r  r   r   r   rY  r   r   r%   rj   Zno_gradr  ro   r3   r3   rN   r4   r   e  s    

1	

 "	r   )rI  r   r   r   r   )rp   )Lr;   r  dataclassesr   typingr   r   r   r   r   r<   Ztorch.utils.checkpointr   Zactivationsr
   Z
generationr   Zmodeling_flash_attention_utilsr   Zmodeling_outputsr   r   r   r   Zmodeling_utilsr   r   Zprocessing_utilsr   Zpytorch_utilsr   r   r   utilsr   r   r   r   r   r   autor   r   r    Zconfiguration_instructblipr"   r#   r$   Z
get_loggerr8   r   r%   r  r?   rl   floatr   r   r   r   r   r   r   r   r   r  r#  r&  r'  r7  r   rI  rY  r   r   __all__r3   r3   r3   r4   <module>   s    
!R
U0(W> 2h_3 J 0	   