o
    ZhG                    @   s  d dl Z d dlmZ d dlmZmZmZmZmZ d dl	Z	d dl	m
Z
 ddlmZ ddlmZ ddlmZ dd	lmZmZmZmZ dd
lmZmZ ddlmZ ddlmZmZmZ ddlm Z m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z'm(Z(m)Z) ddl*m+Z+m,Z,m-Z- e$.e/Z0G dd de
j1Z2	dHde
j1de	j3de	j3de	j3dee	j3 de4de4fddZ5G dd de
j1Z6G d d! d!e
j1Z7G d"d# d#e
j1Z8G d$d% d%e
j1Z9G d&d' d'e
j1Z:G d(d) d)e
j1Z;G d*d+ d+e
j1Z<G d,d- d-e
j1Z=G d.d/ d/e
j1Z>G d0d1 d1e
j1Z?G d2d3 d3e
j1Z@G d4d5 d5e
j1ZAG d6d7 d7ee ZBe"G d8d9 d9eZCG d:d; d;eCZDG d<d= d=eCZEeG d>d? d?e!ZFe"d@dAG dBdC dCeCZGe"dDdAG dEdF dFeCeZHg dGZIdS )I    N)	dataclass)AnyCallableOptionalTupleUnion)nn   )ACT2FN)GenerationMixin)FlashAttentionKwargs)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentionsBaseModelOutputWithPooling,BaseModelOutputWithPoolingAndCrossAttentions)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)apply_chunking_to_forward find_pruneable_heads_and_indicesprune_linear_layer)
LossKwargsModelOutputauto_docstringcan_return_tuplelogging	torch_int   )	AutoModelAutoModelForCausalLMAutoModelForSeq2SeqLM   )InstructBlipVideoConfigInstructBlipVideoQFormerConfigInstructBlipVideoVisionConfigc                       s\   e Zd Zdef fddZdejdededejfdd	Zddej	de
dejfddZ  ZS )!InstructBlipVideoVisionEmbeddingsconfigc                    s   t    || _|j| _|j| _|j| _tt	
dd| j| _tjd| j| j| jd| _| j| j d | _| jd | _tt	
d| j| j| _d S )Nr!   r	   )Zin_channelsZout_channelsZkernel_sizeZstrider   )super__init__r&   hidden_size	embed_dimZ
image_size
patch_sizer   	ParametertorchZrandnclass_embeddingConv2dpatch_embeddingnum_patchesnum_positionsposition_embeddingselfr&   	__class__ o/var/www/auris/lib/python3.10/site-packages/transformers/models/instructblipvideo/modeling_instructblipvideo.pyr(   6   s   
z*InstructBlipVideoVisionEmbeddings.__init__
embeddingsheightwidthreturnc                 C   s   |j d d }| jj d d }tj s||kr||kr| jS | jddddf }| jddddf }|j d }|| j }	|| j }
t|d }|d|||}|dddd}t	j
j||	|
fdd	d
}|dddddd|}tj||fddS )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r!   Ng      ?r   r	   r   ZbicubicF)sizemodeZalign_cornersdim)shaper3   r-   Zjit
is_tracingr+   r   reshapepermuter   
functionalZinterpolateviewcat)r5   r:   r;   r<   r1   r2   Zclass_pos_embedZpatch_pos_embedrB   Z
new_heightZ	new_widthZsqrt_num_positionsr8   r8   r9   interpolate_pos_encodingH   s(   



z:InstructBlipVideoVisionEmbeddings.interpolate_pos_encodingFpixel_valuesrJ   c                 C   s   |j \}}}}| jjj}| |j|d}|ddd}| j|dd|}	t	j
|	|gdd}
|r<| |
||}n| j}|
|d d d |
dd d f | }
|
S )Ndtyper   r!   r>   rA   )rC   r0   weightrM   toflatten	transposer.   expandr-   rI   rJ   r3   r?   )r5   rK   rJ   
batch_size_r;   r<   Ztarget_dtypeZpatch_embedsZclass_embedsr:   r3   r8   r8   r9   forwardp   s   
*z)InstructBlipVideoVisionEmbeddings.forwardF)__name__
__module____qualname__r$   r(   r-   TensorintrJ   FloatTensorboolrU   __classcell__r8   r8   r6   r9   r%   5   s    $(r%           modulequerykeyvalueattention_maskscalingdropoutc           
      K   sp   t ||dd| }|d ur|| }tjj|dd}tjj||| jd}t ||}	|	dd }	|	|fS )Nr>   rA   )ptrainingr!   r   )	r-   matmulrQ   r   rG   Zsoftmaxrf   ri   
contiguous)
r`   ra   rb   rc   rd   re   rf   kwargsattn_weightsattn_outputr8   r8   r9   eager_attention_forward   s   
ro   c                       s   e Zd ZdZ fddZdejdedefddZ			
ddejde	ej de	e
 deeje	ej e	eej  f fddZ  ZS )InstructBlipVideoAttentionz=Multi-headed attention from 'Attention Is All You Need' paperc                    s  t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	d| _
|j| _tj| jd| j dd| _|jr]tt| j}tt| j}nd }d }|d uryt|tj|dd|f}t|| j_t| j| j| _d S )	Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      Fr	   )bias)Zrequires_grad)r'   r(   r&   r)   r*   num_attention_heads	num_headshead_dim
ValueErrorscaleZ	is_causalattention_dropoutr   Linearqkvqkv_biasr,   r-   zerosrI   Z
zeros_likerq   
projection)r5   r&   Zq_biasZv_biasrz   r6   r8   r9   r(      s0   

z#InstructBlipVideoAttention.__init__tensorseq_lenbszc                 C   s    | ||| j| jdd S )Nr!   r   )rH   rs   rt   rQ   rk   )r5   r}   r~   r   r8   r8   r9   _shape   s    z!InstructBlipVideoAttention._shapeNFhidden_states	head_maskoutput_attentionsr=   c                 K   s   |  \}}}| |}|||d| j|| j ddddd}|d |d |d }	}
}t}| jjdkrJ| jjdkrD|rDt	d nt
| jj }|| |	|
|fd	| jsVd
n| j| jd|\}}|||d }| |}|rx||f}|S |d	f}|S )z#Input shape: Batch x Time x Channelr	   r   r   r!      eagerZsdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.Nr_   )rd   rf   re   r>   )r?   ry   rE   rs   rF   ro   r&   Z_attn_implementationloggerwarning_oncer   ri   rw   rv   rk   r|   )r5   r   r   r   rl   r   Ztgt_lenr*   Z	mixed_qkvZquery_statesZ
key_statesZvalue_statesZattention_interfacern   rm   outputsr8   r8   r9   rU      s>   	



z"InstructBlipVideoAttention.forward)NF)rW   rX   rY   __doc__r(   r-   rZ   r[   r   r   r]   r   rU   r^   r8   r8   r6   r9   rp      s    rp   c                       2   e Zd Z fddZdejdejfddZ  ZS )InstructBlipVideoMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S N)r'   r(   r&   r
   
hidden_actactivation_fnr   rx   r)   intermediate_sizefc1fc2r4   r6   r8   r9   r(      s
   
zInstructBlipVideoMLP.__init__r   r=   c                 C   s"   |  |}| |}| |}|S r   )r   r   r   r5   r   r8   r8   r9   rU      s   


zInstructBlipVideoMLP.forwardrW   rX   rY   r(   r-   rZ   rU   r^   r8   r8   r6   r9   r      s    r   c                
       sN   e Zd Zdef fddZ	ddejdejdee de	ej
 fd	d
Z  ZS )InstructBlipVideoEncoderLayerr&   c                    sR   t    |j| _t|| _tj| j|jd| _	t
|| _tj| j|jd| _d S NZeps)r'   r(   r)   r*   rp   	self_attnr   	LayerNormlayer_norm_epslayer_norm1r   mlplayer_norm2r4   r6   r8   r9   r(      s   


z&InstructBlipVideoEncoderLayer.__init__Fr   rd   r   r=   c                 C   sb   |}|  |}| j|||d\}}|| }|}| |}| |}|| }|f}|r/||f7 }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   r   r   )r   r   r   r   )r5   r   rd   r   Zresidualrm   r   r8   r8   r9   rU     s    




z%InstructBlipVideoEncoderLayer.forwardrV   )rW   rX   rY   r"   r(   r-   rZ   r   r]   r   r\   rU   r^   r8   r8   r6   r9   r      s    r   c                       sh   e Zd ZdZdef fddZ				ddeej dee	 dee	 d	ee	 d
e
eef f
ddZ  ZS )InstructBlipVideoEncodera"  
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`InstructBlipVideoEncoderLayer`].

    Args:
        config (`InstructBlipVideoConfig`):
            The corresponding vision configuration for the `InstructBlipVideoEncoder`.
    r&   c                    :   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r8   )r   ).0rT   r&   r8   r9   
<listcomp>7  s    z5InstructBlipVideoEncoder.__init__.<locals>.<listcomp>F)	r'   r(   r&   r   
ModuleListrangenum_hidden_layerslayersgradient_checkpointingr4   r6   r   r9   r(   4  s   
 
z!InstructBlipVideoEncoder.__init__Nrd   r   output_hidden_statesreturn_dictr=   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|r"dnd}|r(dnd}|}t| jD ]/\}	}
|r<||f }| jrL| jrL| |
j	|||}n|
|||d}|d }|r`||d f }q1|rh||f }|svt
dd |||fD S t|||dS )	a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
                Embedded representation of the inputs. Should be float, not int tokens.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr8   r   r   r!   c                 s       | ]	}|d ur|V  qd S r   r8   r   vr8   r8   r9   	<genexpr>z  s    z3InstructBlipVideoEncoder.forward.<locals>.<genexpr>)last_hidden_stater   
attentions)r&   r   r   use_return_dict	enumerater   r   ri   _gradient_checkpointing_func__call__tupler   )r5   inputs_embedsrd   r   r   r   Zencoder_statesZall_attentionsr   idxZencoder_layerlayer_outputsr8   r8   r9   rU   :  sB   

z InstructBlipVideoEncoder.forward)NNNN)rW   rX   rY   r   r"   r(   r   r-   rZ   r]   r   r   r   rU   r^   r8   r8   r6   r9   r   *  s$    		
r   c                       s\   e Zd Zd fdd	Zdd Zdd Zdd	 Zd
d Zdd Z						dddZ	  Z
S )*InstructBlipVideoQFormerMultiHeadAttentionFc                    s"  t    || _|j|j dkrt|dstd|j|jf |j| _t|j|j | _| j| j | _	t
|j| j	| _|rQt
|j| j	| _t
|j| j	| _nt
|j| j	| _t
|j| j	| _t
|j| _t|dd| _| jdks{| jdkr|j| _t
d|j d	 | j| _d
| _d S )Nr   Zembedding_sizezLThe hidden size (%d) is not a multiple of the number of attention heads (%d)position_embedding_typeabsoluterelative_keyrelative_key_queryr   r!   F)r'   r(   r&   r)   rr   hasattrru   r[   attention_head_sizeall_head_sizer   rx   ra   Zencoder_hidden_sizerb   rc   DropoutZattention_probs_dropout_probrf   getattrr   max_position_embeddings	Embeddingdistance_embeddingsave_attentionr5   r&   is_cross_attentionr6   r8   r9   r(     s.   


z3InstructBlipVideoQFormerMultiHeadAttention.__init__c                 C   
   || _ d S r   attn_gradients)r5   r   r8   r8   r9   save_attn_gradients     
z>InstructBlipVideoQFormerMultiHeadAttention.save_attn_gradientsc                 C      | j S r   r   r5   r8   r8   r9   get_attn_gradients     z=InstructBlipVideoQFormerMultiHeadAttention.get_attn_gradientsc                 C   r   r   attention_map)r5   r   r8   r8   r9   save_attention_map  r   z=InstructBlipVideoQFormerMultiHeadAttention.save_attention_mapc                 C   r   r   r   r   r8   r8   r9   get_attention_map  r   z<InstructBlipVideoQFormerMultiHeadAttention.get_attention_mapc                 C   s6   |  d d | j| jf }|j| }|ddddS )Nr>   r   r   r!   r	   )r?   rr   r   rH   rF   )r5   xZnew_x_shaper8   r8   r9   transpose_for_scores  s   
z?InstructBlipVideoQFormerMultiHeadAttention.transpose_for_scoresNc                 C   s  |d u}|r|  | |}	|  | |}
|}n;|d urD|  | |}	|  | |}
tj|d |	gdd}	tj|d |
gdd}
n|  | |}	|  | |}
| |}|  |}|	|
f}t||	dd}| jdksv| jdkr|	 d }tj
|tj|jd	dd}tj
|tj|jd	dd}|| }| || j d }|j|jd
}| jdkrtd||}|| }n| jdkrtd||}td|	|}|| | }|t| j }|j}|d ur|| }tjdd||}|r| jr| | || j | |}|d ur|| }t||
}|dddd }|	 d d | jf }|j| }|r=||fn|f}||f }|S )Nr   r   rA   r!   r>   rg   r   r   rM   devicerL   zbhld,lrd->bhlrzbhrd,lrd->bhlrr	   ) r   rb   rc   r-   rI   ra   rj   rQ   r   r?   arangelongr   rH   r   r   rO   rM   Zeinsummathsqrtr   r   ZSoftmaxr   r   register_hookr   rf   rF   rk   r   )r5   r   rd   r   encoder_hidden_statesencoder_attention_maskpast_key_valuer   r   Z	key_layerZvalue_layerZmixed_query_layerZquery_layerZattention_scores
seq_lengthZposition_ids_lZposition_ids_rZdistanceZpositional_embeddingZrelative_position_scoresZrelative_position_scores_queryZrelative_position_scores_keyZattention_scores_dtypeZattention_probsZattention_probs_droppedZcontext_layerZnew_context_layer_shaper   r8   r8   r9   rU     s`   









z2InstructBlipVideoQFormerMultiHeadAttention.forwardrV   NNNNNF)rW   rX   rY   r(   r   r   r   r   r   rU   r^   r8   r8   r6   r9   r     s    r   c                       8   e Zd Z fddZdejdejdejfddZ  ZS )"InstructBlipVideoQFormerSelfOutputc                    sB   t    t|j|j| _tj|j|jd| _t|j	| _
d S r   )r'   r(   r   rx   r)   denser   r   r   hidden_dropout_probrf   r4   r6   r8   r9   r(        
z+InstructBlipVideoQFormerSelfOutput.__init__r   input_tensorr=   c                 C   &   |  |}| |}| || }|S r   r   rf   r   r5   r   r   r8   r8   r9   rU        

z*InstructBlipVideoQFormerSelfOutput.forwardr   r8   r8   r6   r9   r         $r   c                       s   e Zd Zd fdd	Zdd Z						ddejdeej d	eej d
eej deej dee	e	ej   dee
 de	ej fddZ  ZS )!InstructBlipVideoQFormerAttentionFc                    s,   t    t||| _t|| _t | _d S r   )r'   r(   r   	attentionr   outputsetpruned_headsr   r6   r8   r9   r(     s   

z*InstructBlipVideoQFormerAttention.__init__c                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r!   rA   )lenr   r   rr   r   r   r   ra   rb   rc   r   r   r   union)r5   headsindexr8   r8   r9   prune_heads  s   z-InstructBlipVideoQFormerAttention.prune_headsNr   rd   r   r   r   r   r   r=   c              	   C   s<   |  |||||||}| |d |}	|	f|dd   }
|
S )Nr   r!   )r   r   )r5   r   rd   r   r   r   r   r   Zself_outputsattention_outputr   r8   r8   r9   rU   ,  s   
	z)InstructBlipVideoQFormerAttention.forwardrV   r   )rW   rX   rY   r(   r   r-   rZ   r   r\   r   r]   rU   r^   r8   r8   r6   r9   r     s4    	r   c                       r   )$InstructBlipVideoQFormerIntermediatec                    sD   t    t|j|j| _t|jt	rt
|j | _d S |j| _d S r   )r'   r(   r   rx   r)   r   r   
isinstancer   strr
   intermediate_act_fnr4   r6   r8   r9   r(   E  s
   
z-InstructBlipVideoQFormerIntermediate.__init__r   r=   c                 C   s   |  |}| |}|S r   )r   r   r   r8   r8   r9   rU   M  s   

z,InstructBlipVideoQFormerIntermediate.forwardr   r8   r8   r6   r9   r   D  s    r   c                       r   )InstructBlipVideoQFormerOutputc                    sB   t    t|j|j| _tj|j|jd| _t	|j
| _d S r   )r'   r(   r   rx   r   r)   r   r   r   r   r   rf   r4   r6   r8   r9   r(   T  r   z'InstructBlipVideoQFormerOutput.__init__r   r   r=   c                 C   r   r   r   r   r8   r8   r9   rU   Z  r   z&InstructBlipVideoQFormerOutput.forwardr   r8   r8   r6   r9   r   S  r   r   c                       sD   e Zd Z fddZ							dddZdd	 Zd
d Z  ZS )InstructBlipVideoQFormerLayerc                    s~   t    |j| _d| _t|| _|| _||j dkr&t|dd| _d| _	nd| _	t
|| _t|| _t
|| _t|| _d S )Nr!   r   T)r   F)r'   r(   chunk_size_feed_forwardseq_len_dimr   r   	layer_idxZcross_attention_frequencycrossattentionhas_cross_attentionr   intermediater   r   intermediate_queryoutput_query)r5   r&   r  r6   r8   r9   r(   b  s   




z&InstructBlipVideoQFormerLayer.__init__NFr   c	              	   C   s:  |d ur
|d d nd }	| j |||||	d}
|
d }|
dd }|
d }|dkr|d d d |d d f }| jrW|d u r@td| j||||||d}|d }||dd  }t| j| j| j|}|jd |krt| j	| j| j|d d |d d d f }t
j||gdd}n
t| j	| j| j|}|f| }||f }|S )	Nr   )r   r   r   r!   r>   z>encoder_hidden_states must be given for cross-attention layersr   rA   )r   r  ru   r  r   feed_forward_chunk_queryr   r   rC   feed_forward_chunkr-   rI   )r5   r   rd   r   r   r   r   r   query_lengthZself_attn_past_key_valueZself_attention_outputsr   r   Zpresent_key_valueZquery_attention_outputZcross_attention_outputslayer_outputZlayer_output_textr8   r8   r9   rU   v  sd   

z%InstructBlipVideoQFormerLayer.forwardc                 C      |  |}| ||}|S r   )r  r   r5   r   Zintermediate_outputr
  r8   r8   r9   r       
z0InstructBlipVideoQFormerLayer.feed_forward_chunkc                 C   r  r   )r  r  r  r8   r8   r9   r    r  z6InstructBlipVideoQFormerLayer.feed_forward_chunk_query)NNNNNFr   )rW   rX   rY   r(   rU   r  r  r^   r8   r8   r6   r9   r   a  s    
Gr   c                       s:   e Zd Z fddZ										d	ddZ  ZS )
InstructBlipVideoQFormerEncoderc                    r   )Nc                    s   g | ]}t  |qS r8   )r   )r   r  r   r8   r9   r         z<InstructBlipVideoQFormerEncoder.__init__.<locals>.<listcomp>F)	r'   r(   r&   r   r   r   r   layerr   r4   r6   r   r9   r(     s   

z(InstructBlipVideoQFormerEncoder.__init__NFTr   c              
   C   sV  |	rdnd }|r
dnd }|rdnd }|rdnd }t | jjD ]l}| j| }|	r,||f }|d ur4|| nd }|d ur>|| nd }t| jddr_| jr_|rStd d}| |j	|||||}n|||||||||}|d }|rw||d f7 }|r||d f }|j
r||d f }q|	r||f }|
std	d
 |||||fD S t|||||dS )Nr8   r   FzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...r   r>   r!   r   c                 s   r   r   r8   r   r8   r8   r9   r     s    z:InstructBlipVideoQFormerEncoder.forward.<locals>.<genexpr>)r   past_key_valuesr   r   cross_attentions)r   r&   r   r  r   ri   r   warningr   r   r  r   r   )r5   r   rd   r   r   r   r  	use_cacher   r   r   r	  Zall_hidden_statesZall_self_attentionsZall_cross_attentionsZnext_decoder_cacheiZlayer_moduleZlayer_head_maskr   r   r8   r8   r9   rU     sx   

	
z'InstructBlipVideoQFormerEncoder.forward)
NNNNNNFFTr   )rW   rX   rY   r(   rU   r^   r8   r8   r6   r9   r    s    r  c                       s2   e Zd ZdZ fddZ				dddZ  ZS )	"InstructBlipVideoQFormerEmbeddingsz;Construct the embeddings from word and position embeddings.c                    s   t    tj|j|j|jd| _t|j|j| _	tj
|j|jd| _t|j| _| jdt|jddd t|dd| _|| _d S )	N)Zpadding_idxr   position_ids)r!   r>   F)
persistentr   r   )r'   r(   r   r   
vocab_sizer)   Zpad_token_idword_embeddingsr   position_embeddingsr   r   	layernormr   r   rf   Zregister_bufferr-   r   rR   r   r   r&   r4   r6   r8   r9   r(   *  s   

z+InstructBlipVideoQFormerEmbeddings.__init__Nr   c                 C   s   |d ur|  d }nd}|d u r | jd d ||| f  }|d urI| |}| jdkr;| ||j}|| }|d urHtj	||fdd}n|}|| j
jj}| 
|}| |}|S )Nr!   r   r   rA   )r?   r  cloner  r   r  rO   r   r-   rI   r  rN   rM   rf   )r5   	input_idsr  query_embedspast_key_values_lengthr   r:   r  r8   r8   r9   rU   :  s$   



z*InstructBlipVideoQFormerEmbeddings.forward)NNNr   )rW   rX   rY   r   r(   rU   r^   r8   r8   r6   r9   r  '  s    r  c                   @   s   e Zd ZdS )KwargsForCausalLMN)rW   rX   rY   r8   r8   r8   r9   r!  Z  s    r!  c                   @   sD   e Zd ZeZdZdZdZdZdZ	dZ
dZdZdZg dZdd ZdS ) InstructBlipVideoPreTrainedModelZblipTF)r  rp   r   r   c                 C   s   | j j}t|tjtjfr%|jjjd|d |j	dur#|j	j
  dS dS t|tjr6|jjjd|d dS t|tjrK|j	j
  |jjd dS t|trftjj|jd|d tjj|jd|d dS t|ttfru|jj
  dS dS )zInitialize the weightsr_   )meanZstdN      ?)r&   Zinitializer_ranger   r   rx   r/   rN   dataZnormal_rq   Zzero_r   r   Zfill_r%   initZtrunc_normal_r3   r.   )InstructBlipVideoForConditionalGenerationInstructBlipVideoModelquery_tokens)r5   r`   factorr8   r8   r9   _init_weightsq  s"   

z.InstructBlipVideoPreTrainedModel._init_weightsN)rW   rX   rY   r"   config_classZbase_model_prefixZsupports_gradient_checkpointing_supports_attention_backend_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_static_cache_supports_quantized_cache_no_split_modulesr+  r8   r8   r8   r9   r"  ]  s    r"  c                       s~   e Zd ZdZeZdef fddZe					ddee	j
 dee dee d	ee d
edeeef fddZdd Z  ZS )InstructBlipVideoVisionModelrK   r&   c                    sJ   t  | || _|j}t|| _t|| _tj	||j
d| _|   d S r   )r'   r(   r&   r)   r%   r:   r   encoderr   r   r   post_layernorm	post_init)r5   r&   r*   r6   r8   r9   r(     s   

z%InstructBlipVideoVisionModel.__init__NFr   r   r   rJ   r=   c           
      C   s   |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u r&td| j||d}| j||||d}|d }| |}|d d dd d f }	| |	}	|s[||	f|dd   S t||	|j	|j
dS )Nz You have to specify pixel_values)rJ   )r   r   r   r   r   r!   )r   pooler_outputr   r   )r&   r   r   r   ru   r:   r6  r7  r   r   r   )
r5   rK   r   r   r   rJ   r   encoder_outputsr   pooled_outputr8   r8   r9   rU     s2   	

z$InstructBlipVideoVisionModel.forwardc                 C   r   r   )r:   r   r8   r8   r9   get_input_embeddings  r   z1InstructBlipVideoVisionModel.get_input_embeddingsNNNNF)rW   rX   rY   main_input_namer$   r,  r(   r   r   r-   r\   r]   r   r   r   rU   r<  r^   r8   r8   r6   r9   r5    s0    
*r5  c                       s"  e Zd ZdZdZdZdZdZdef fddZ	dd Z
dd	 Zd
d Z	d!dejdee dejdedejf
ddZ											d"dejdeej deej deej deej deej deej deeeej   dee dee dee dee deeej ef fdd Z  ZS )#InstructBlipVideoQFormerModelz
    Querying Transformer (Q-Former), used in InstructBlipVideo. Slightly modified from BLIP-2 as it also takes the
    instruction as input.
    Fr&   c                    s2   t  | || _t|| _t|| _|   d S r   )r'   r(   r&   r  r:   r  r6  r8  r4   r6   r8   r9   r(     s
   

z&InstructBlipVideoQFormerModel.__init__c                 C   s   | j jS r   r:   r  r   r8   r8   r9   r<    s   z2InstructBlipVideoQFormerModel.get_input_embeddingsc                 C   s   || j _d S r   r@  r5   rc   r8   r8   r9   set_input_embeddings  s   z2InstructBlipVideoQFormerModel.set_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr6  r  r   r   )r5   Zheads_to_pruner  r   r8   r8   r9   _prune_heads  s   z*InstructBlipVideoQFormerModel._prune_headsrd   input_shaper   	has_queryr=   c                 C   s   |  dkr|dddddddf }n|  dkr(|ddddddf }ntd| d|j d|j| jd}d| d	 }|S )
a>  
        Makes broadcastable attention and causal masks so that future and masked tokens are ignored.

        Arguments:
            attention_mask (`torch.Tensor`):
                Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
            input_shape (`Tuple[int]`):
                The shape of the input to the model.
            device: (`torch.device`):
                The device of the input to the model.

        Returns:
            `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
        r	   Nr   z!Wrong shape for input_ids (shape z) or attention_mask (shape )rL   r$  g     )rB   ru   rC   rO   rM   )r5   rd   rE  r   rF  extended_attention_maskr8   r8   r9   get_extended_attention_mask  s   	z9InstructBlipVideoQFormerModel.get_extended_attention_maskNr  r  r  r   r   r   r  r  r   r   r   c                    s  |
dur|
n j j}
|dur|n j j}|dur|n j j}|du r*|du r*td|dur;|d d jd  j j nd}|durF|jd nd} j||||d}| dd }|\}}|j	}|du rot
j||| f|d} |||}|durt|tr|d  \}}}n| \}}}||f}t|tr fd	d
|D }n|du rt
j||d} |}n |}nd} | j j} j|||||||	|
|||d}|d }|dddddf }|s||f|dd  S t|||j|j|j|jdS )a  
        encoder_hidden_states  (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
            the model is configured as a decoder.
        encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
            the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.
        past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
            shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
            value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
            used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
            value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
            `(batch_size, sequence_length)`.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        Nz7You have to specify query_embeds when input_ids is Noner   r   r!   )r  r  r  r   r>   )r   c                    s   g | ]}  |qS r8   )invert_attention_mask)r   maskr   r8   r9   r   _  r  z9InstructBlipVideoQFormerModel.forward.<locals>.<listcomp>)
rd   r   r   r   r  r  r   r   r   r	  )r   r9  r  r   r   r  )r&   r   r   r   ru   rC   r	  r:   r?   r   r-   onesrI  r   listrJ  Zget_head_maskr   r6  r   r  r   r   r  )r5   r  rd   r  r  r   r   r   r  r  r   r   r   r   r	  Zembedding_outputrE  rS   r   r   rH  Zencoder_batch_sizeZencoder_sequence_lengthrT   Zencoder_hidden_shapeZencoder_extended_attention_maskr:  Zsequence_outputr;  r8   r   r9   rU     sv   "$

z%InstructBlipVideoQFormerModel.forwardrV   )NNNNNNNNNNN)rW   rX   rY   r   r-  r.  r/  r0  r#   r(   r<  rB  rD  r-   rZ   r   r[   r   r]   rI  
LongTensorr   r\   r   r   rU   r^   r8   r8   r6   r9   r?    sx    

.	
r?  c                   @   s   e Zd ZU dZdZeeej  e	d< dZ
eeej  e	d< dZeej e	d< dZeeej  e	d< dZeeej  e	d< dee fd	d
ZdS )4InstructBlipVideoForConditionalGenerationModelOutputa.  
    Class defining the outputs of [`InstructBlipVideoForConditionalGeneration`].

    Args:
        loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
            Language modeling loss from the language model.
        logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
            Prediction scores of the language modeling head of the language model.
        vision_outputs (`BaseModelOutputWithPooling`):
            Outputs of the vision encoder.
        qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
            Outputs of the Q-Former (Querying Transformer).
        language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
            Outputs of the language model.
    Nlosslogitsvision_outputsqformer_outputslanguage_model_outputsr=   c                    s   t  fdd  D S )Nc                 3   s.    | ]}|d vr | nt  | V  qdS )rR  rS  rT  N)r   to_tuple)r   kr   r8   r9   r     s    
zPInstructBlipVideoForConditionalGenerationModelOutput.to_tuple.<locals>.<genexpr>)r   keysr   r8   r   r9   rV    s   z=InstructBlipVideoForConditionalGenerationModelOutput.to_tuple)rW   rX   rY   r   rP  r   r   r-   r\   __annotations__rQ  rR  rS  rT  r   rV  r8   r8   r8   r9   rO    s   
 rO  z`
    InstructBlipVideo base Model consisting of language model, qformer and vision encoder.
    )Zcustom_introc                !       s   e Zd ZdZdgZdef fddZdd Zdd	 Zd
d Z	dd Z
ee										ddejdejdeej deej deej deej deej dee dee dee dedee dee deeef fddZ  ZS ) r(  rK   r)  r&   c                    s   t  | t|j| _ttd|j	|j
j| _t|j
| _t|j
j|jj| _t|j| _| jjd ur@| j| jj | jjd urN| j| jj |   d S Nr!   )r'   r(   r5  vision_configvision_modelr   r,   r-   r{   num_query_tokensqformer_configr)   r)  r?  qformerrx   text_configlanguage_projectionr   from_configlanguage_modelr4  extend_keep_in_fp32_modulesr8  r4   r6   r8   r9   r(     s   zInstructBlipVideoModel.__init__c                 C   
   | j  S r   rc  r<  r   r8   r8   r9   r<    r   z+InstructBlipVideoModel.get_input_embeddingsc                 C      | j | d S r   rc  rB  rA  r8   r8   r9   rB       z+InstructBlipVideoModel.set_input_embeddingsc                 C   ,   | j js| jj| jj_| jj| jj_d S d S r   r&   use_decoder_only_language_modelrc  Zsharedr6  Zembed_tokensdecoderr   r8   r8   r9   _tie_weights     z#InstructBlipVideoModel._tie_weightsc                 C   P   | j }t|dkrd|vrtj dkrtd t| jdr&d| jj	_
dS dS z
        Some pre-processing hacks to make the model `accelerate` compatible. Check
        https://github.com/huggingface/transformers/pull/21707 for more details.
        r!   rc  a  The `language_model` is not in the `hf_device_map` dictionary and you are running your script in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`. Please pass a `device_map` that contains `language_model` to remove this warning. Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for more details on creating a `device_map` for large models._hf_hookTNhf_device_mapr   r-   cudaZdevice_countr   r  r   rc  rs  Zio_same_devicer5   ru  r8   r8   r9   _preprocess_accelerate     "z-InstructBlipVideoModel._preprocess_accelerateNFqformer_input_idsqformer_attention_maskr  rd   decoder_input_idsdecoder_attention_maskr   r   r   rJ   r  rl   r=   c                 K   s  |
dur|
n| j j}
|j\}}}}}||| |||}| j|||	|
|d}|d }tj| dd tj|j	d}| j
|jd dd}tj| dd tj|j	d}|du r^t|}|j|dd}|j|dd}tj||gdd}| j|||||||	|
d}|d ddd|dddf }| |}||| j j| d}| j |}|du rt|}|| j jkd|}| ||< | j jr| jd||||	|
|d	|}n| jd||||||	|
|d
|}t|||dS )a  
        qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
            to serve as text prompt, which the Q-Former model will encode.

            Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for
            details.

            [What are input IDs?](../glossary#input-ids)
        qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
            provided to serve as text prompt, which the language model can continue.

            Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for
            details.

            [What are input IDs?](../glossary#input-ids)
        decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.

            Only relevant in case an encoder-decoder language model (like T5) is used.
        N)rK   r   r   r   rJ   r   r>   r   rA   r!   )r  rd   r  r   r   r   r   r   r   rd   r   r   r   r  )r   rd   r|  r}  r   r   r   r  rU  r8   )r&   r   rC   rE   r\  r-   rL  r?   r   r   r)  rR   	ones_likerepeat_interleaverI   r_  ra  r]  rc  r<  video_token_id	unsqueeze	expand_asrP   rm  rO  )r5   rK   rz  r{  r  rd   r|  r}  r   r   r   rJ   r  rl   rS   frameschannelr;   r<   rR  image_embedsimage_attention_maskr)  query_attention_maskquery_outputsquery_outputlanguage_model_inputsr   special_image_maskr   r8   r8   r9   rU     s   0  
$



	zInstructBlipVideoModel.forward)
NNNNNNNNFN)rW   rX   rY   r>  re  r"   r(   r<  rB  ro  rx  r   r   r-   r\   r   rN  r]   r   r   r   r   rO  rU   r^   r8   r8   r6   r9   r(    sb    	

r(  a  
    InstructBlipVideo Model for generating text given an image and an optional text prompt. The model consists of a vision
    encoder, Querying Transformer (Q-Former) and a language model.

    One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
    the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
    c                #       s  e Zd ZeZdZdZdZdZdgZ	def fddZ
dd	 Zd
d Zdd ZdejfddZdd Zdd Zdd Zdd Z			d/dejdejdeej dee dee f
ddZee											d0dejdejdeej d eej d!eej d"eej d#eej d$ee d%ee d&eej dee ded'ee d(ee dee e!f fd)d*Z"e# 					d1dejdeej deej d eej d!eej dedejfd+d,Z$			d/dejdejdeej dee dee f
d-d.Z%  Z&S )2r'  rK   TFr)  r&   c                    s   t  | t|j| _tt	d|j
|jj| _t|j| _t|jj|jj| _|jr7t|j}nt|j}|jd urI| j|j |jd urU| j|j || _|   d S rZ  )r'   r(   r5  Z_from_configr[  r\  r   r,   r-   r{   r]  r^  r)   r)  r?  r_  rx   r`  ra  rm  r   rb  r    r4  rd  re  rc  r8  )r5   r&   rc  r6   r8   r9   r(   |  s   

z2InstructBlipVideoForConditionalGeneration.__init__c                 C   rf  r   rg  r   r8   r8   r9   r<    r   z>InstructBlipVideoForConditionalGeneration.get_input_embeddingsc                 C   rh  r   ri  rA  r8   r8   r9   rB    rj  z>InstructBlipVideoForConditionalGeneration.set_input_embeddingsc                 C   rh  r   )rc  set_output_embeddings)r5   Znew_embeddingsr8   r8   r9   r    rj  z?InstructBlipVideoForConditionalGeneration.set_output_embeddingsr=   c                 C   rf  r   )rc  get_output_embeddingsr   r8   r8   r9   r    r   z?InstructBlipVideoForConditionalGeneration.get_output_embeddingsc                 C   rf  r   )rc  get_encoderr   r8   r8   r9   r    r   z5InstructBlipVideoForConditionalGeneration.get_encoderc                 C   rf  r   )rc  get_decoderr   r8   r8   r9   r    r   z5InstructBlipVideoForConditionalGeneration.get_decoderc                 C   rk  r   rl  r   r8   r8   r9   ro    rp  z6InstructBlipVideoForConditionalGeneration._tie_weightsc                 C   rq  rr  rt  rw  r8   r8   r9   rx    ry  z@InstructBlipVideoForConditionalGeneration._preprocess_accelerateNrz  r{  rJ   r   c                 C   s   dS )$  
        Encodes images into continuous embeddings that can be forwarded to the language model.

        Args:
            pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
                The tensors corresponding to the input images.
        Nr8   )r5   rK   rz  r{  rJ   r   r8   r8   r9   get_image_features  s   z<InstructBlipVideoForConditionalGeneration.get_image_featuresr  rd   r|  r}  r   r   labelsr  rl   c                 K   s  |dur|n| j j}| j||||dd\}}}|s| n|}|s%| n|}tj| dd tj|jd}| j	
 |}|du rGt|}t| j dddurg|| j jkd|}| |j||< ntd tj|||jgdd	}tj|||jgdd	}| j jr| j	d||||	||d
|}|r|jn|d }d}|
dur| jd||
| j jjd|}n$| j	d||||||	||
|d	|}|r|jn|d }|r|jn|d }t|||||dS )aS  
        qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
            to serve as text prompt, which the Q-Former model will encode.

            Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for
            details.

            [What are input IDs?](../glossary#input-ids)
        qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
            provided to serve as text prompt, which the language model can continue.

            Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for
            details.

            [What are input IDs?](../glossary#input-ids)
        decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.

            Only relevant in case an encoder-decoder language model (like T5) is used.
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size -
            1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
            config.vocab_size]`

        Examples:

        ```python
        >>> from transformers import InstructBlipVideoProcessor, InstructBlipVideoForConditionalGeneration
        >>> import torch
        >>> from huggingface_hub import hf_hub_download
        >>> import av
        >>> import numpy as np

        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])

        >>> model = InstructBlipVideoForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto")
        >>> processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")

        >>> file_path = hf_hub_download(
        ...       repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample uniformly 4 frames from the videWhy is this video funny?o
        >>> total_frames = container.streams.video[0].frames
        >>> indices = np.arange(0, total_frames, total_frames / 4).astype(int)
        >>> clip = read_video_pyav(container, indices)

        >>> prompt = "What is happening in the video?"
        >>> inputs = processor(text=prompt, images=clip, return_tensors="pt").to(model.device)

        >>> outputs = model.generate(
        ...     **inputs,
        ...     do_sample=False,
        ...     num_beams=5,
        ...     max_length=256,
        ...     repetition_penalty=1.5,
        ...     length_penalty=1.0,
        ... )
        >>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
        >>> print(generated_text)
        "A person is eating a bowl of pasta, and they are using a fork to eat it. The person is sitting at a table, and the plate of pasta is on the table in front"
        ```NTrz  r{  rJ   r   r>   r   r  K  Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.r!   rA   r~  r   )rQ  r  r  )	r   rd   r|  r}  r   r   r   r  r  )rP  rQ  rR  rS  rT  r8   )r&   r   get_video_featuresrV  r-   rL  r?   r   r   rc  r<  r  r   r  r  r  rP   rO   r   r   rI   rm  rQ  Zloss_functionr`  r  rP  rO  )r5   rK   rz  r{  r  rd   r|  r}  r   r   r  r   rJ   r  rl   r  rR  r  Zlanguage_model_attention_maskr   r  r   rQ  rP  r8   r8   r9   rU     s   n
	
z1InstructBlipVideoForConditionalGeneration.forwardc                 K   s  t | dr	|   |jd }| j||||dd\}	}
}tj|	 dd tj|	jd}|du r\| j	j
jg}t| j	dddurK| j	jg| j	j d	 | }tj|gtj|jd}||d
}|du ret|}|  |}t| j	dddur|| j	jkd|}|	 |j||< n@td tj|	||	jgd
d}tj|||jgd
d}| jj	js|dd|	jd
  d
 |d< |dd|	jd
  |d< ||d}| jj	js||d< | jjdi ||}|S )a  
        Overrides `generate` function to be able to use the model as a conditional generator.

        Args:
            pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width) or
                (batch_size, num_frames, num_channels, height, width)): Input images or videos to be processed.
            qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                The sequence used as a prompt to be fed to the Q-Former module.
            qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                Mask to avoid performing attention on padding token indices.
            input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                The sequence used as a prompt for the generation.
            attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                Mask to avoid performing attention on padding token indices.
            interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
                Whether to interpolate the positional encoding of the image embeddings.

        Returns:
            captions (list): A list of strings of length batch_size * num_captions.
        ru  r   Tr  Nr>   r   r  r   r!   r  rA   
max_length   Z
min_length)r   rd   r  r8   )r   rx  rC   r  r-   rL  r?   r   r   r&   r`  Zbos_token_idr   r  r]  r}   repeatr  r<  r  r  rP   rO   r   r   rI   rc  Zis_encoder_decodergetgenerate)r5   rK   rz  r{  r  rd   rJ   Zgenerate_kwargsrS   r  rR  r  Zlanguage_attention_maskZstart_tokensr   r  Zinputsr   r8   r8   r9   r    sR   





z2InstructBlipVideoForConditionalGeneration.generatec                 C   s>  |j \}}}}	}
||| ||	|
}| j||dd}|d }tj| dd tj|jd}| j	|j d dd}tj| dd tj|jd}|du rRt
|}|j|dd}|j|dd}tj||gdd}| j|||||dd	}|d ddd|dddf }| |}||| jj| d}|r|||fS |S )
r  T)rK   rJ   r   r   Nr>   r   rA   r!   )r  rd   r  r   r   r   )rC   rE   r\  r-   rL  r?   r   r   r)  rR   r  r  rI   r_  ra  r&   r]  )r5   rK   rz  r{  rJ   r   rS   r  r  r;   r<   rR  r  r  r)  r  r  r  r  r8   r8   r9   r    s<     
$

z<InstructBlipVideoForConditionalGeneration.get_video_features)NFF)NNNNNNNNNFNr=  )'rW   rX   rY   r"   r,  r>  r1  r2  r3  re  r(   r<  rB  r  r   Moduler  r  r  ro  rx  r-   r\   rN  r   r]   r  r   r   r   r!  r   r   rO  rU   Zno_gradr  r  r^   r8   r8   r6   r9   r'  k  s    

	

 9	_r'  )r5  r"  r?  r(  r'  )r_   )Jr   dataclassesr   typingr   r   r   r   r   r-   r   Zactivationsr
   Z
generationr   Zmodeling_flash_attention_utilsr   Zmodeling_outputsr   r   r   r   Zmodeling_utilsr   r   Zprocessing_utilsr   Zpytorch_utilsr   r   r   utilsr   r   r   r   r   r   autor   r   r    Zconfiguration_instructblipvideor"   r#   r$   Z
get_loggerrW   r   r  r%   rZ   floatro   rp   r   r   r   r   r   r   r   r   r   r  r  r!  r"  r5  r?  rO  r(  r'  __all__r8   r8   r8   r9   <module>   s    
R
T0V 1g_3'> J  :	   3