o
    Zh^                    @   s  d Z ddlZddlZddlmZ ddlmZmZmZm	Z	m
Z
 ddlZddlZddlmZ ddlmZ ddlmZmZ dd	lmZmZ dd
lmZmZ ddlmZmZmZmZ ddlmZ ddl m!Z!m"Z" ddl#m$Z$m%Z%m&Z& ddl'm(Z(m)Z)m*Z* e&+e,Z-dej.dej.fddZ/dej.dej.fddZ0dd Z1dHddZ2						dIddZ3eG d d! d!e$Z4eG d"d# d#e$Z5G d$d% d%ej6Z7G d&d' d'ej6Z8G d(d) d)ej6Z9G d*d+ d+ej6Z:G d,d- d-ej6Z;G d.d/ d/ej6Z<G d0d1 d1ej6Z=G d2d3 d3ej6Z>G d4d5 d5ej6Z?G d6d7 d7ej6Z@e%G d8d9 d9eZAG d:d; d;eAZBG d<d= d=eAZCe%G d>d? d?eAZDe%d@dAG dBdC dCeAeZEe%dDdAG dEdF dFeAeZFg dGZGdS )JzPyTorch CLVP model.    N)	dataclass)CallableDictOptionalTupleUnion)nn)CrossEntropyLoss   )ACT2FNget_activation)GenerationConfigGenerationMixin)_prepare_4d_attention_mask!_prepare_4d_causal_attention_mask)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentionsBaseModelOutputWithPooling!CausalLMOutputWithCrossAttentions)PreTrainedModel)Conv1Disin_mps_friendly)ModelOutputauto_docstringlogging   )
ClvpConfigClvpDecoderConfigClvpEncoderConfiglogitsreturnc                 C   s   t j| tjt| | jdS )Ndevice)r   
functionalZcross_entropytorcharangelenr"   )r    r'   U/var/www/auris/lib/python3.10/site-packages/transformers/models/clvp/modeling_clvp.pycontrastive_loss7   s   r)   
similarityc                 C   s    t | }t |  }|| d S )Ng       @)r)   t)r*   Zcaption_lossZspeech_lossr'   r'   r(   	clvp_loss<   s   r,   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shaper$   cat)xx1Zx2r'   r'   r(   rotate_halfC   s   r5   c           
      C   sb   ||  |}||  |}| | t| |  }|| t||  }|| t||  }	|||	fS )an  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`):
            The position indices of the tokens corresponding to the query and key tensors. For example, this can be
            used to pass offsetted position ids when working with a KV-cache.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer5   )
qkvcossinposition_idsZunsqueeze_dimZq_embedZk_embedZv_embedr'   r'   r(   apply_rotary_pos_embJ   s   
r=      Tc                 C   s  |rt jjj| d|d} |durt jjj|dddn|}| }|rt j| jd | jd d f| j| jd}t| D ]<\}}	t	|	|
 rht |	|kd  }
t |	d|
 t j|g| jd|	|
d g||< q8t jjj|	d|d||< q8|durt jjj|dddn|}||fS )	z
    This method adds extra bos and eos tokens to input_ids and accordingly modifies the attention_mask which is used in
    `ClvpConditioningEncoder` and the generation loop of the `ClvpModelForConditionalGeneration`.
    )r   r   valueNr   r   dtyper"   r!   )r   r   )r$   r   r#   padZzerosr1   rB   r"   	enumerater   sumwhereminZconcatenatetensor)	input_idsattention_maskZpad_token_idbos_token_ideos_token_idadd_bos_tokenZadd_eos_tokenZmodified_input_idsiZeach_input_idposr'   r'   r(   _pad_extra_bos_eos_tokensg   s&   &
rP   c                   @   st   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeeej  ed< dZeeej  ed< dS )ClvpEncoderOutputak  
    Base class for CLVP encoder's outputs that contains a pooling of the last hidden states as well as a projection
    output (a linear layer on top of the pooled output).

    Args:
        embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when model is initialized with `with_projection=True`):
            The embeddings obtained by applying the projection layer to the pooler_output.
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            The hidden state of the last layer of the model.
        pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
            Pooled output of the `last_hidden_state`.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
            the model at the output of each layer plus the optional initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
            the self-attention heads.
    Nembedslast_hidden_statepooler_outputhidden_states
attentions)__name__
__module____qualname____doc__rR   r   r$   FloatTensor__annotations__rS   rT   rU   r   rV   r'   r'   r'   r(   rQ      s   
 rQ   c                   @   s   e Zd ZU dZdZeej ed< dZ	eej
 ed< dZeej ed< dZeej ed< dZeej ed< dZeej ed< dZeed	< dZeed
< dZeej ed< dZeej ed< dZeej ed< dS )
ClvpOutputai  
    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
            Contrastive loss for speech-text similarity.
        speech_ids (`torch.LongTensor`, *optional*):
            speech_ids (or speech candidates) generated by the `ClvpForCausalLM` model.
        logits_per_speech (`torch.FloatTensor` of shape `(speech_batch_size, text_batch_size)`):
            The scaled dot product scores between `speech_embeds` and `text_embeds`. This represents the speech-text
            similarity scores.
        logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, speech_batch_size)`):
            The scaled dot product scores between `text_embeds` and `speech_embeds`. This represents the text-speech
            similarity scores.
        text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
            The text embeddings obtained by applying the projection layer to the pooled output of the text encoder
            model.
        speech_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
            The speech embeddings obtained by applying the projection layer to the pooled output of the speech encoder
            model.
        text_model_output (`BaseModelOutputWithPooling`):
            The pooled output of the `last_hidden_state` of the text encoder Model.
        speech_model_output (`BaseModelOutputWithPooling`):
            The pooled output of the `last_hidden_state` of the speech encoder Model.
        decoder_hidden_states (`torch.FloatTensor`, *optional*):
            The hidden states of the decoder model.
        text_encoder_hidden_states (`torch.FloatTensor`, *optional*):
            The hidden states of the text encoder model.
        speech_encoder_hidden_states (`torch.FloatTensor`, *optional*):
            The hidden states of the speech encoder model.
    Nloss
speech_idslogits_per_speechlogits_per_texttext_embedsspeech_embedstext_model_outputspeech_model_outputdecoder_hidden_statestext_encoder_hidden_statesspeech_encoder_hidden_states)rW   rX   rY   rZ   r^   r   r$   r[   r\   r_   
LongTensorr`   ra   rb   rc   rd   r   re   rf   rg   rh   r'   r'   r'   r(   r]      s   
 r]   c                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	ClvpRMSNormư>c                    s&   t    tt|| _|| _dS )z:
        ClvpRMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parameterr$   onesweightvariance_epsilon)selfhidden_sizeeps	__class__r'   r(   rm      s   

zClvpRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr.   r-   T)keepdim)	rB   tor$   Zfloat32powmeanZrsqrtrq   rp   )rr   rU   Zinput_dtypeZvariancer'   r'   r(   forward   s
   zClvpRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tuplerp   r1   rq   rr   r'   r'   r(   
extra_repr   s   zClvpRMSNorm.extra_repr)rk   )rW   rX   rY   rm   r{   r~   __classcell__r'   r'   ru   r(   rj      s    rj   c                       6   e Zd ZdZ fddZdejdejfddZ  ZS )ClvpRotaryPositionalEmbeddingz
    Rotary Position Embedding Class for CLVP. It was proposed in the paper 'ROFORMER: ENHANCED TRANSFORMER WITH ROTARY
    POSITION EMBEDDING', Please see https://arxiv.org/pdf/2104.09864v1.pdf .
    c                    s`   t    t|j|jd  d}ddtjd|dtjd |   }| 	d| d | _
d | _d S )Nr.             ?i'  r   rB   inv_freq)rl   rm   maxprojection_dimnum_attention_headsr$   r%   Zint64floatregister_buffercached_sequence_length"cached_rotary_positional_embedding)rr   configr0   r   ru   r'   r(   rm      s   
$
z&ClvpRotaryPositionalEmbedding.__init__rU   r    c                 C   sv   |j d }|| jkr| jd ur| jS || _tj||jd| j}td|| j}tj	||fdd}|
d| _| jS )Nr   r!   zi,j->ijr-   r/   r   )r1   r   r   r$   r%   r"   Ztype_asr   Zeinsumr2   r6   )rr   rU   sequence_lengthZtime_stampsZfreqsZ
embeddingsr'   r'   r(   r{     s   
z%ClvpRotaryPositionalEmbedding.forward	rW   rX   rY   rZ   rm   r$   r[   r{   r   r'   r'   ru   r(   r      s    	r   c                       s   e Zd ZdZ fddZdejdedefddZ									
			
ddej	de
ej	 de
ej de
ej de
eej  de
e de
ej	 de
e deej	e
ej	 e
eej	  f fddZ  ZS )ClvpSelfAttentionzu
    Multi-headed attention to combine Absolute and Rotary Positional Embeddings into a single Attention module.
    c                    s  t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	|j
| _t|dr\|j}ttj||ftjd}|dd||}| jd|d	d
 tj| j| j|jd| _tj| j| j|jd| _tj| j| j|jd| _t| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).      max_position_embeddingsr   r   biasF)
persistentr   )rl   rm   r   rs   	embed_dimr   	num_headshead_dim
ValueErrorscaleZattention_dropoutdropouthasattrr   r$   Ztrilro   boolviewr   r   LinearZuse_attention_biask_projv_projq_projout_proj)rr   r   Zmax_positionsr   ru   r'   r(   rm     s,   


zClvpSelfAttention.__init__rH   seq_lenbszc                 C   s    | ||| j| jdd S )Nr   r.   )r   r   r   	transpose
contiguous)rr   rH   r   r   r'   r'   r(   _shape.  s    zClvpSelfAttention._shapeNFrU   rotary_pos_embrJ   r<   past_key_value	use_cache	head_maskoutput_attentionsr    c	                  C   s  |d ur|d u rt d| \}	}
}| | |d|	| j }| | |d|	}| | |d|	}|d urN|\}}tj||fdd}tj||fdd}|du rW||f}nd }|d ur|j	d }|dd |f |d|d f }}|dd |f |d|d f }}|dd |f |d|d f }}|
 d| d}}t||||||\}}}tj||fdd}tj||fdd}tj||fdd}|j	d }|j	d }t||dd	}|d ur| |	d
||fkrt d|	d
||f d|  || }tjj|dd}|d ur|| }tjj|| j| jd}t||}| |	| j|| jfkrCt d|	| j|| jf d|  |d
d }||	|| j}| |}|s]d }|||fS )NzB`position_ids` must be provided when `rotary_pos_emb` is not None.r-   r/   T.r   r.   r
   r   z!Attention mask should be of size z	, but is )ptrainingz `attn_output` should be of size )r   sizer   r   r   r   r   r$   r2   r1   r:   squeezer;   r=   matmulr   r   r#   Zsoftmaxr   r   r   r   r   Zreshaper   r   ) rr   rU   r   rJ   r<   r   r   r   r   r   _r   Zquery_statesZ
key_statesZvalue_statesZpast_keyZ
past_valueZpresentZrotary_emb_dimZ	query_rotZ
query_passZkey_rotZkey_passZ	value_rotZ
value_passr:   r;   Ztgt_lenZsrc_lenZattn_weightsZ
attn_probsattn_outputr'   r'   r(   r{   1  sn   







zClvpSelfAttention.forward)NNNNFNF)rW   rX   rY   rZ   rm   r$   Tensorintr   r[   r   ri   r   r   r{   r   r'   r'   ru   r(   r     s<    	
r   c                       r   )ClvpGatedLinearUnitz
    `ClvpGatedLinearUnit` uses the second half of the `hidden_states` to act as a gate for the first half of the
    `hidden_states` which controls the flow of data from the first of the tensor.
    c                    s0   t    t|j | _t|j|jd | _	d S )Nr.   )
rl   rm   r   Z
hidden_actactivation_fnr   r   rs   intermediate_sizeprojrr   r   ru   r'   r(   rm     s   
zClvpGatedLinearUnit.__init__rU   r    c                 C   s&   |  |jddd\}}|| | S )Nr.   r-   r/   )r   chunkr   )rr   rU   Zgater'   r'   r(   r{     s   zClvpGatedLinearUnit.forwardr   r'   r'   ru   r(   r     s    r   c                       r   )ClvpEncoderMLPzA
    This MLP is used in CLVP speech or text encoder models.
    c                    s>   t    || _t|| _t|j|j| _	t
|j| _d S N)rl   rm   r   r   fc1r   r   r   rs   fc2Dropoutr   dropout_layerr   ru   r'   r(   rm     s
   

zClvpEncoderMLP.__init__rU   r    c                 C   s"   |  |}| |}| |}|S r   )r   r   r   rr   rU   r'   r'   r(   r{     s   


zClvpEncoderMLP.forwardr   r'   r'   ru   r(   r     s    r   c                       sZ   e Zd Zdef fddZ	ddejdejdejdejd	ee	 d
e
ej fddZ  ZS )ClvpEncoderLayerr   c                    sT   t    || _|j| _t|| _t|| _t	| j|j
d| _t	| j|j
d| _d S )Nrt   )rl   rm   r   rs   r   r   	self_attnr   mlprj   layer_norm_epsinput_rmsnormpost_attention_rmsnormr   ru   r'   r(   rm     s   


zClvpEncoderLayer.__init__FrU   r   rJ   r<   r   r    c           	      C   sn   |}|  |}| j|||||d}|d }|| }|}| |}| |}|| }|f}|r5||d f7 }|S )a6  
        Args:
            hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`):
                input to the layer.
            rotary_pos_emb (`torch.FloatTensor`):
                rotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module.
            attention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`):
                attention mask where padding elements are indicated by very large negative values.
            position_ids (`torch.LongTensor`):
                Denotes position ids of the input tokens.
            output_attentions (`bool`, *optional*, defaults to `False`):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )rU   r   rJ   r<   r   r   r-   )r   r   r   r   )	rr   rU   r   rJ   r<   r   residualZattention_outputsoutputsr'   r'   r(   r{     s&   


zClvpEncoderLayer.forward)F)rW   rX   rY   r   rm   r$   r[   ri   r   r   r   r{   r   r'   r'   ru   r(   r     s     r   c                       sJ   e Zd ZdZdef fddZ	ddejdeej	 dejfd	d
Z
  ZS )ClvpSequenceSummarya  
    Compute a single vector summary of a sequence hidden states.

    Args:
        config ([`ClvpConfig`]):
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):

            - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:

                - `"last"` -- Take the last token hidden state (like XLNet)
                - `"first"` -- Take the first token hidden state (like Bert)
                - `"mean"` -- Take the mean of all tokens hidden states
                - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - `"attn"` -- Not implemented now, use multi-head attention

            - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
            - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
              (otherwise to `config.hidden_size`).
            - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
              another string or `None` will add no activation.
            - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
            - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
    r   c                    s   t    t|dd| _| jdkrtt | _t|dr<|j	r<t|dr1|j
r1|jdkr1|j}n|j}t|j|| _t|dd }|rHt|nt | _t | _t|drc|jdkrct|j| _t | _t|d	r{|jdkr}t|j| _d S d S d S )
Nsummary_typelastattnsummary_use_projsummary_proj_to_labelsr   Zsummary_activationsummary_first_dropoutsummary_last_dropout)rl   rm   getattrr   NotImplementedErrorr   ZIdentitysummaryr   r   r   Z
num_labelsrs   r   r   
activationfirst_dropoutr   r   last_dropoutr   )rr   r   Znum_classesZactivation_stringru   r'   r(   rm     s&   




zClvpSequenceSummary.__init__NrU   	cls_indexr    c                 C   s  | j dkr|dddf }ne| j dkr|dddf }nW| j dkr(|jdd}nK| j d	krl|du rItj|d
ddddf |jd d tjd}n|dd}|d| d  |	df }|
d|d}n| j dkrst| |}| |}| |}| |}|S )ak  
        Compute a single vector summary of a sequence hidden states.

        Args:
            hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):
                The hidden states of the last layer.
            cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
                Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.

        Returns:
            `torch.FloatTensor`: The summary of the sequence hidden states.
        r   Nr-   firstr   rz   r   r/   r   .r   r   r-   r   )r   rz   r$   Z	full_liker1   longr6   expandr0   r   gatherr   r   r   r   r   r   )rr   rU   r   outputr'   r'   r(   r{   *  s.   



"




zClvpSequenceSummary.forwardr   )rW   rX   rY   rZ   r   rm   r$   r[   r   ri   r{   r   r'   r'   ru   r(   r     s    r   c                       s:   e Zd Z fddZdeeej  dejfddZ  Z	S )ClvpDecoderMLPc                    sF   t    |j}t||| _t||| _t|j | _t	
|j| _d S r   )rl   rm   rs   r   c_fcc_projr   Zactivation_functionactr   r   Zresid_pdropr   )rr   r   r   r   ru   r'   r(   rm   X  s   
zClvpDecoderMLP.__init__rU   r    c                 C   s,   |  |}| |}| |}| |}|S r   )r   r   r   r   r   r'   r'   r(   r{   `  s
   



zClvpDecoderMLP.forward)
rW   rX   rY   rm   r   r   r$   r[   r{   r   r'   r'   ru   r(   r   W  s    &r   c                       s   e Zd Z fddZ						ddeeej  deeej  deej	 deej	 d	eej d
ee
 dee
 deeej eeejeejdf f  f fddZ  ZS )ClvpDecoderLayerc                    sf   t    |j}|jd ur|jnd| }tj||jd| _t|| _	tj||jd| _
t||| _d S )N   r   )rl   rm   rs   Zn_innerr   	LayerNormlayer_norm_epsiloninput_layernormr   r   post_attention_layernormr   r   )rr   r   rs   Z	inner_dimru   r'   r(   rm   i  s   

zClvpDecoderLayer.__init__NFrU   r   rJ   r<   r   r   r   r    .c              	   C   s   |}|  |}| j|||||||d}	|	d }
|	dd  }|
| }|}| |}| |}|| }|r:|f| }|S |f|dd   }|S )Nr   rJ   r<   r   r   r   r   r   )r   r   r   r   )rr   rU   r   rJ   r<   r   r   r   r   Zattn_outputsr   r   Zfeed_forward_hidden_statesr'   r'   r(   r{   t  s.   

	


zClvpDecoderLayer.forward)NNNNFF)rW   rX   rY   rm   r   r   r$   r[   r   ri   r   r   r{   r   r'   r'   ru   r(   r   h  s2    (	r   c                
       sp   e Zd ZdZdef fddZddedefdd	Z	
	
	
ddej	de
ej de
ej	 de
ej fddZ  ZS )ClvpConditioningEncodera  
    This class processes the log-mel spectrograms(extracted by the Feature Extractor) and text tokens(produced by the
    tokenizer) as inputs for the decoder model.

    First each log-mel spectrogram is processed into a single vector which captures valuable characteristics from each
    of them, then the text tokens are converted into token embeddings and position embeddings are added afterwards.
    Both of these vectors are concatenated and then passed to the decoder model.

    The text tokens helps to incorporate the "text information" and the log-mel spectrogram is used to specify the
    "voice characteristics" into the generated mel tokens.
    r   c                    s   t    |j_|j_tjjjj_tjj	jj_
tjjjjjdd_jj t fddtjjD _tfddtjjD _d_d S )Nr   )Zkernel_sizec                    s"   g | ]}t j jjd ddqS )gh㈵>T)rt   Zaffine)r   Z	GroupNormdecoder_configrs   .0r   Z
num_groupsrr   r'   r(   
<listcomp>  s    z4ClvpConditioningEncoder.__init__.<locals>.<listcomp>c                       g | ]}t  jqS r'   )r   r   r   r}   r'   r(   r         F)rl   rm   text_configr   r   	Embedding
vocab_sizers   text_token_embeddingmax_text_tokenstext_position_embeddingConv1dZfeature_sizemel_convcompute_groupnorm_groups
ModuleListrangeZnum_mel_attn_blocksgroup_normsmel_attn_blocksgradient_checkpointingr   ru   r   r(   rm     s$   


z ClvpConditioningEncoder.__init__r   channelsgroupsc                 C   sZ   |dkrd}n|dkrd}|| dkrt |d }|| dks|dkr+td| d|S )a  
        Calculates the value of `num_groups` for nn.GroupNorm. This logic is taken from the official tortoise
        repository. link :
        https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/models/arch_util.py#L26
              @   r   r.   zENumber of groups for the GroupNorm must be greater than 2, but it is z0.Please consider using a different `hidden_size`)r   r   )rr   r  r  r'   r'   r(   r     s   
z0ClvpConditioningEncoder.compute_groupnorm_groupsNinput_featuresrI   inputs_embedsrJ   c                 C   sj  |d ur|d urt d|d ur| \}}n|d ur&| d d \}}nt d|d u r:tj||gtj|jd}t||| jj| jj	d\}}| 
|}|dd }| |}|| }	| jr| jrtjj| j|}
t| jD ]+\}}|
dd}tjj| j| |
dd}
tjj||
d | }
|
dd}
qqn.| |}
t| jD ]#\}}|
dd}| j| |
dd}
||
d | }
|
dd}
q|
d d d d df }
|
d}
|	jd dkr|
jd dkr|	|
jd dd}	n7|	jd dkr|
jd dkr|
|	jd dd}
n|	jd |
jd kr,t d	|	jd  d
|
jd  dtj|
|	gddS )NDYou cannot specify both input_ids and inputs_embeds at the same timer-   5You have to specify either input_ids or inputs_embedsrA   )rK   rL   r   r.   r   z=The number of texts and number of audios must be same. Found z
 texts vs z audiosr/   )r   r   r$   ro   r   r"   rP   r   rK   rL   r   cumsumr   r  r   utils
checkpointr   rD   r  r   r  r6   r1   repeatconcat)rr   r	  rI   r
  rJ   Z
batch_sizeZ
seq_lengthr<   position_embedsrb   Zmel_specrN   Zmel_attn_blockZresidual_mel_specr'   r'   r(   r{     s`   



	
 zClvpConditioningEncoder.forward)r   NNN)rW   rX   rY   rZ   r   rm   r   r   r$   r[   r   ri   r{   r   r'   r'   ru   r(   r     s     r   c                   @   s$   e Zd ZeZdZdZdZdd ZdS )ClvpPreTrainedModelZclvpTpast_key_valuesc                 C   s  | j j}t|tjr|jjjd|d d nt|tjt	tj
fr7|jjjd|d d |jdur6|jj  nt|trz| j j}|j jd d|j j d  | }d|j j d | }tjjt|jdrh|jjjn|jj|d tjj|jj|d nYt|tr| j  }|j}|jjjjd||jd  d n<t|tr|jjjjd|d |jjj  n%t|tr| D ]\}}|d	kr|jjd| j jtd| j j  d qt|tjr|jj  |jj d
 dS dS )zInitialize the weightsg        g{Gz?)rz   stdNr   r.   r   )r  zc_proj.weightr   )!r   Zinitializer_factor
isinstancer   r   rp   dataZnormal_r   r   r   r   Zzero_r   rs   num_hidden_layersinitr   r   r   r   ClvpEncoderZget_text_config
projectionr   r   ClvpForCausalLMZnamed_parametersZinitializer_rangemathsqrtr   Zfill_)rr   modulefactorZin_proj_stdZfc_stdr   namer   r'   r'   r(   _init_weights-  s@   

 *

 

z!ClvpPreTrainedModel._init_weightsN)	rW   rX   rY   r   config_classZbase_model_prefixZsupports_gradient_checkpointingZ_skip_keys_device_placementr#  r'   r'   r'   r(   r  &  s    r  c                       s   e Zd ZdZdef fddZdd Zdd Z														dd
ee	j
 dee	j
 dee	j
 dee	j
 dee dee dee deeef fddZ  ZS )r  z
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`ClvpEncoderLayer`].

    Args:
        config: ClvpConfig
    r   c                    s   t     | _t j j| _ jrt	 nd | _
t fddt jD | _t | _tj j jd| _tj j jdd| _d| _|   d S )Nc                    s   g | ]}t  qS r'   )r   r   r   r'   r(   r   ]  s    z(ClvpEncoder.__init__.<locals>.<listcomp>r   Fr   )rl   rm   r   r   r   r   rs   token_embeddingZuse_rotary_embeddingr   r   r   r   r  layersr   sequence_summaryr   r   final_layer_normr   r   r  r  	post_initr   ru   r%  r(   rm   W  s    
zClvpEncoder.__init__c                 C      | j S r   r&  r}   r'   r'   r(   get_input_embeddingsh     z ClvpEncoder.get_input_embeddingsc                 C   
   || _ d S r   r,  rr   r@   r'   r'   r(   set_input_embeddingsk     
z ClvpEncoder.set_input_embeddingsNrI   r
  rJ   r<   r   output_hidden_statesreturn_dictr    c                 C   s  |dur|n| j j}|dur|n| j j}|dur|n| j j}|dur*|dur*td|durF| || | }|d|d }| |}n|durS| dd }ntd|durat	||j
}|du r|durl|jn|j}	tj|d tj|	d}|d}|rdnd}
|rdnd}| jdur| |nd}|}t| jD ]4\}}|r|
|f }
| jr| jrtjj|j||||}n	||||||d	}|d }|r||d f }q|r|
|f }
|}| |}| |}| |}|std
d ||||
|fD S t||||
|dS )a  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
                Indices of input sequence tokens in the vocabulary.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                input embeddings for the model. This bypasses the model's internal embedding lookup matrix.
            attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            position_ids (`torch.LongTensor`, *optional*):
                Denotes the position ids of `input_ids`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr  r-   r  r   rA   r   r'   )r   c                 s       | ]	}|d ur|V  qd S r   r'   r   r9   r'   r'   r(   	<genexpr>  s    z&ClvpEncoder.forward.<locals>.<genexpr>)rR   rS   rT   rU   rV   )r   r   r3  use_return_dictr   %warn_if_padding_and_no_attention_maskr   r   r&  r   rB   r"   r$   r%   r   r6   r   rD   r'  r  r   r  r  __call__r)  r(  r  r|   rQ   )rr   rI   r
  rJ   r<   r   r3  r4  input_shaper"   Zencoder_statesZall_attentionsr   rU   idxZencoder_layerZlayer_outputsrS   Zpooled_outputrR   r'   r'   r(   r{   n  s~   '





zClvpEncoder.forward)NNNNNNN)rW   rX   rY   rZ   r   rm   r-  r1  r   r$   ri   r   r   r   r   r{   r   r'   r'   ru   r(   r  N  s:    
	r  c                       s   e Zd ZdZ fddZdd Zdd Zdd	 Ze	
	
	
	
	
	
	
	
	
	
	
dde	e
j de	e
j de	e
j de	e
j de	e
j de	eee
j   de	e
j de	e de	e de	e de	e deeef fddZ  ZS )ClvpDecoderzs
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ClvpDecoderLayer`]
    c                    s   t  | | _t jj jj _t jj jj _	t
 jj _t fddt jjD  _tj jj jjd _d _   d S )Nc                    r   r'   )r   r   r   r}   r'   r(   r     r   z(ClvpDecoder.__init__.<locals>.<listcomp>r   F)rl   rm   r   r   r   r   rs   input_embeds_layerr   position_embeds_layerr   Z
embd_pdropdropr   r   r  r'  r   r   
layer_normr  r*  r   ru   r}   r(   rm     s   "zClvpDecoder.__init__c                 C   r+  r   r>  r}   r'   r'   r(   r-     r.  z ClvpDecoder.get_input_embeddingsc                 C   r/  r   rB  rr   Znew_embeddingsr'   r'   r(   r1    r2  z ClvpDecoder.set_input_embeddingsc                 C   s(   |  D ]\}}| j| j| qdS )zv
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        N)itemsr'  r   Zprune_heads)rr   Zheads_to_prunelayerZheadsr'   r'   r(   _prune_heads  s   zClvpDecoder._prune_headsNrI   rJ   token_type_idsr<   r   r  r
  r   r   r3  r4  r    c              
   C   s  |	d ur|	n| j j}	|
d ur|
n| j j}
|d ur|n| j j}|d ur$|n| j j}|d ur4|d ur4td|d urP| || | }|d|d }|j	d  n|d urb| d d }|j	d  ntd|d urm|j
n|j
}|d ur||d|d }|d u rd}td gt| j }n	|d d d}|d u rtj||d | tj|d}|dd|d }|d u r| |}| |}|| }t||||}| || j j}|}|d ur| |}|| }| |}d|dd   |df }| jr| jr|rtd	 d
}|rdnd }|	rdnd }|	r | j jr dnd }|
r'dnd }tt| j|D ]d\}\}}|
r?||f }| jrW| jrWtjj  |j!|d |||| }n||||||| ||	d}|d }|du rt||d f }|	r|||r~dnd f }| j jr|||rdnd f }q1| "|}||}|
r||f }|stdd |||||fD S t#|||||dS )Nr  r-   r   r  r   rA   r   r   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr'   r   Tr.   r
   c                 s   r5  r   r'   r6  r'   r'   r(   r7    s    z&ClvpDecoder.forward.<locals>.<genexpr>rS   r  rU   rV   cross_attentions)$r   r   r3  r   r8  r   r9  r   r   r1   r"   r|   r&   r'  r$   r%   r   r6   r>  r?  r   Zget_head_maskr  r@  r  r   loggerZwarning_onceZadd_cross_attentionrD   zipr  r  r:  rA  r   )rr   rI   rJ   rG  r<   r   r  r
  r   r   r3  r4  r;  r"   Zpast_key_values_lengthr  rU   Ztoken_type_embedsZoutput_shapeZpresentsZall_self_attentionsZall_cross_attentionsZall_hidden_statesrN   blockr   r   r'   r'   r(   r{     s   




	





zClvpDecoder.forwardNNNNNNNNNNN)rW   rX   rY   rZ   rm   r-  r1  rF  r   r   r$   ri   r[   r   r   r   r   r   r{   r   r'   r'   ru   r(   r=    sV    	

r=  c                       s   e Zd Zdef fddZdd Zdd Zdd	 Ze	
	
	
	
	
	
	
	
	
	
	
dde	e
j de	e
j de	e
j de	e
j de	e
j de	eee
j   de	e
j de	e de	e de	e de	e deeef fddZ  ZS )	ClvpModelr   c                    s*   t  | || _t| j| _|   d S r   )rl   rm   r   r=  decoderr*  r   ru   r'   r(   rm     s   zClvpModel.__init__c                 C   s   | j jS r   rO  r>  r}   r'   r'   r(   r-    s   zClvpModel.get_input_embeddingsc                 C   s   || j _d S r   rP  r0  r'   r'   r(   r1    s   zClvpModel.set_input_embeddingsc                 C   r+  r   )rO  r}   r'   r'   r(   get_decoder  r.  zClvpModel.get_decoderNrI   rJ   rG  r<   r   r  r
  r   r   r3  r4  r    c                 C   s   |	d ur|	n| j j}	|
d ur|
n| j j}
|d ur|n| j j}|d ur$|n| j j}| j|||||||||	|
|d}|s<|S t|j|j|j	|j
|jdS )N)rI   rJ   rG  r<   r   r  r
  r   r   r3  r4  rH  )r   r   r3  r   r8  rO  r   rS   r  rU   rV   rI  )rr   rI   rJ   rG  r<   r   r  r
  r   r   r3  r4  decoder_outputsr'   r'   r(   r{     s6   zClvpModel.forwardrM  )rW   rX   rY   r   rm   r-  r1  rQ  r   r   r$   ri   r[   r   r   r   r   r   r{   r   r'   r'   ru   r(   rN    sT    	

rN  zG
    The CLVP decoder model with a language modelling head on top.
    )Zcustom_introc                       sh  e Zd Z fddZdd Zdd Z			d!deej d	ee	 d
ee
eejf  deejee e
eejf f fddZ	d!ddZe												d"deej deeeej   deej deej deej deej deej deej dee dee dee dee deeef fddZedeeej  dejdeeej  fdd Z  ZS )#r  c                    sT   t  | || _t| j| _t| jj| _tj	| jj| jj
dd| _|   d S )NTr   )rl   rm   r   rN  modelr   r   rs   
final_normr   r   lm_headr*  r   ru   r'   r(   rm     s   zClvpForCausalLM.__init__c                 C   s
   | j jjS r   rS  rO  r>  r}   r'   r'   r(   r-    r2  z$ClvpForCausalLM.get_input_embeddingsc                 C   s   || j j_d S r   rV  rC  r'   r'   r(   r1    s   z$ClvpForCausalLM.set_input_embeddingsNinputsrK   model_kwargsr    c           	   
   C   s  | j }dd | D }||d}|dur+|dur+td| d| d| d| d	|dur1|}|d	krJd
|v rJ| j|||d|d	< |d
 d
}}|dd}|dur| jjt	j
|jd df| jj|jd}|| jjt	j
|jd dfd|jd7 }t	j||gdd}t|dr|d  dd }nt	jd|jd t	j|jd}|d|jd d}|| jj| |d
< t	j|d
 jd dft	j| jd| jj |d	< |d
 d
|fS | |||}|||fS )zT
        This function extracts the model-specific `inputs` for generation.
        c                 S   s   i | ]\}}|d ur||qS r   r'   )r   r8   r9   r'   r'   r(   
<dictcomp>  s    z9ClvpForCausalLM._prepare_model_inputs.<locals>.<dictcomp>Nz
`inputs`: z` were passed alongside z/ which is not allowed.Make sure to either pass z or z=...rI   r
  )rX  conditioning_embedsr   r   )Z
fill_valuer"   r/   rJ   r-   rA   )Zmain_input_namerD  popr   Z*_maybe_initialize_input_ids_for_generationgetrS  rO  r>  r$   fullr1   r   rK   r"   r?  r  r   r   r  r%   r6   r  ro   )	rr   rW  rK   rX  Z
input_nameZinputs_kwargrZ  Zmel_start_token_embeddingr<   r'   r'   r(   _prepare_model_inputs  s^   	




 
z%ClvpForCausalLM._prepare_model_inputsc                 K   sV  |j d }|dd }|rC|d d j d }|j d |kr |}	n|j d d }	|d d |	d f }|d urC|d d |j d  d f }|dd }
|dd }|
d urv|d u rv|
 dd }||
dkd |ru|d d df d}nd }|d ur|d urtj|gtj|jd}|d ur|d u rd	|i}nd
|i}|	||d||d |S )Nr-   rG  r   r.   r   rJ   r<   rA   r
  rI   r   )r  r   r<   rG  )
r1   r\  r   r  Zmasked_fill_r6   r$   rH   r"   update)rr   rI   r  r
  rZ  kwargsZinput_ids_lengthrG  Zpast_lengthZremove_prefix_lengthrJ   r<   Zmodel_inputsr'   r'   r(   prepare_inputs_for_generation<  s@   

z-ClvpForCausalLM.prepare_inputs_for_generationrI   r  rJ   rG  r<   r   r
  labelsr   r   r3  r4  c                 C   s>  |
dur|
n| j j}
|dur|n| j j}|	dur|	n| j j}	|dur$|n| j j}| j||||||||	|
||d}|d }| |}| |}d}|dur{||j	}|dddddf 
 }|dddf 
 }t }||d|d|d}|s|f|dd  }|dur|f| S |S t|||j|j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
        N)rI   r  rJ   rG  r<   r   r
  r   r   r3  r4  r   .r-   r   )r^   r   r  rU   rV   rI  )r   r   r3  r   r8  rS  rT  rU  rx   r"   r   r	   r   r   r   r  rU   rV   rI  )rr   rI   r  rJ   rG  r<   r   r
  rb  r   r   r3  r4  r   rU   Z	lm_logitsr^   Zshift_logitsZshift_labelsZloss_fctr   r'   r'   r(   r{   q  sN   

zClvpForCausalLM.forwardbeam_idxc                    s   t  fdd| D S )a  
        This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
        [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
        beam_idx at every generation step.
        c                 3   s&    | ]}t  fd d|D V  qdS )c                 3   s$    | ]}| d  |jV  qdS )r   N)Zindex_selectrx   r"   )r   Z
past_staterc  r'   r(   r7    s   " z;ClvpForCausalLM._reorder_cache.<locals>.<genexpr>.<genexpr>Nr|   )r   Z
layer_pastrd  r'   r(   r7    s
    
z1ClvpForCausalLM._reorder_cache.<locals>.<genexpr>re  )r  rc  r'   rd  r(   _reorder_cache  s   	zClvpForCausalLM._reorder_cacher  )NNNNNNNNNNNN)rW   rX   rY   rm   r-  r1  r   r$   r   r   r   strr   r^  ra  r   ri   r[   r   r   r   r{   staticmethodrf  r   r'   r'   ru   r(   r    s    
D
5	

Gr  z`
    The composite CLVP model with a text encoder, speech encoder and speech decoder model.
    c                       s  e Zd ZeZdef fddZdejdejfddZ			dd	e	ej d
e	ej
 de	ej dej
fddZ						dde	ej d	e	ej de	ej
 de	ej
 de	ej de	e dej
fddZe									dd	e	ej de	ej
 de	ej
 d
e	ej
 de	ej de	e de	e de	e de	e deeef fddZe 						dd	e	ej de	ej
 de	ej de	e de	e de	e fddZ  ZS ) !ClvpModelForConditionalGenerationr   c                    s   t  | t|jtstdt|j dt|jts(tdt|j dt|jt	s9tdt|j dt
|| _t|j| _t|j| _t|j| _tt| jj| _|   d S )NzPconfig.text_config is expected to be of type `ClvpEncoderConfig` but is of type .zRconfig.speech_config is expected to be of type `ClvpEncoderConfig` but is of type zSconfig.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type )rl   rm   r  r   r   	TypeErrortypeZspeech_configr   r   r   conditioning_encoderr  speech_decoder_modelr  text_encoder_modelspeech_encoder_modelr   rn   r$   rH   r   Zlogit_scale_init_valuelogit_scaler*  r   ru   r'   r(   rm     s2   
z*ClvpModelForConditionalGeneration.__init__r_   r    c                 C   s   | j jj}|ddddf }t|| jj jkdd}tj|| |d d}t	|D ]7\}}|
 dkr6q+| }|d |||df< |d |jd k rbtj|dd g|jtjd||ddf< q+|S )a#  
        This method modifies the output of the decoder model, such as replacing the `eos_token_id` and changing the
        last few tokens of each sequence.

        Args:
            speech_ids (`torch.LongTensor`):
                This refers to the output of the decoder model.
        Nr   r   )maskr@   r
   )r"   rB   )r   r   decoder_fixing_codesr$   rF   rn  rL   Zmasked_fillr   rD   rE   argmaxr1   rH   r"   r   )rr   r_   rt  Zstop_token_indicesrN   Zeach_seq_stop_token_indexZstmr'   r'   r(   fix_speech_decoder_output  s   
	z;ClvpModelForConditionalGeneration.fix_speech_decoder_outputNrI   text_encoder_inputs_embedsrJ   c                 C   s   | j |||d}|d S )a  
        This method can be used to extract text_embeds from a text. The text embeddings obtained by applying the
        projection layer to the pooled output of the CLVP text encoder model.

        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                [What are input IDs?](../glossary#input-ids)
            text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
                inputs_embeds for the text encoder model passed in place of `input_ids`.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)

        Returns:
            `torch.FloatTensor` of shape `(batch_size, output_dim)`:
                The text embeddings obtained by applying the projection layer to the pooled output of the CLVP Text
                Model.

        Examples:

        ```python
        >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration

        >>> # Define the Text
        >>> text = "This is an example text."

        >>> # Define processor and model
        >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
        >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")

        >>> # Generate processor output and text embeds
        >>> processor_output = processor(text=text, return_tensors="pt")
        >>> text_embeds = model.get_text_features(input_ids=processor_output["input_ids"])
        ```
        )rI   r
  rJ   r   )ro  )rr   rI   rw  rJ   r   r'   r'   r(   get_text_features  s   1z3ClvpModelForConditionalGeneration.get_text_featuresr	  "conditioning_encoder_inputs_embedsgeneration_configc           
      K   s   |du r;|du r|du s|du rt d|du r| j}|jdi | | j||||d}| jj||d}| |d }| j||d}	|	d S )a*  
        This method can be used to extract speech_embeds. The speech embeddings are obtained by applying the speech
        model on speech_ids. If speech_ids is not present but both input_ids and input_features are given then the
        decoder model will be used to first generate the speech_ids and then applying the speech model.

        Args:
            speech_ids (`torch.LongTensor` of shape `(batch_size, num_speech_ids)`, *optional*):
                Speech Tokens. Padding will be ignored by default should you provide it. If speech_ids are provided
                then input_ids and input_features will be automatically ignored.
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Input text Tokens. Processed from the [`ClvpTokenizer`]. If speech_ids is not provided, then input_ids
                and input_features will be used.
            input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*):
                Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`]. If
                speech_ids is not provided, then input_ids and input_features will be used.
            conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
                inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.
            attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding speech token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            generation_config (`GenerationConfig`, *optional*):
                generation config to control the generation of speech_ids if they are not provided.

        Returns:
            `torch.FloatTensor` of shape `(batch_size, output_dim)`:
                The speech embeddings obtained by applying the projection layer to the pooled output of the CLVP Speech
                Model.

        Examples:

        ```python
        >>> import datasets
        >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration

        >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
        >>> text = "This is an example text."
        >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
        >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
        >>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()

        >>> # Define processor and model
        >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
        >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")

        >>> # Generate processor output and model output
        >>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt")
        >>> speech_embeds = model.get_speech_features(
        ...     input_ids=processor_output["input_ids"], input_features=processor_output["input_features"]
        ... )
        ```
        NzfEither speech_ids or input_ids/conditioning_encoder_inputs_embeds and input_features must be provided.r	  rI   r
  rJ   )rZ  rz  r   )rI   rJ   r'   )r   rz  r_  rm  rn  generaterv  rp  )
rr   r_   rI   r	  ry  rJ   rz  r`  rZ  r   r'   r'   r(   get_speech_featuresJ  s0   Bz5ClvpModelForConditionalGeneration.get_speech_featuresFreturn_lossr3  r   r4  c
                 C   s|  |dur|n| j j}|	dur|	n| j j}	| j||||d}
| j|
||	d}|d }|jdkr3|d}| |}| j|||	d}| j	|||||	d}|d }|d }||j
dd	d
d }||j
dd	d
d }| j }t|| | }| }d}|rt|}|	s|||||d |d f}|r||d	 |d	 |d	 f7 }|dur|f| S |S t||||||d |d |j|j|jd
S )a  
        input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`):
            Indicates log mel-spectrogram representations for audio returned by [`ClvpFeatureExtractor`].
        conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
            inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.
        text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
            inputs_embeds for the text encoder model passed in place of `input_ids`.
        return_loss (`bool`, *optional*):
            Whether or not to return the contrastive loss.

        Examples:

        ```python
        >>> import datasets
        >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration

        >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
        >>> text = "This is an example text."

        >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
        >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
        >>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()

        >>> # Define processor and model
        >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
        >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")

        >>> # processor outputs and model outputs
        >>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt")
        >>> outputs = model(
        ...     input_ids=processor_output["input_ids"],
        ...     input_features=processor_output["input_features"],
        ...     return_dict=True,
        ... )
        ```
        Nr{  )r
  r3  r4  r   r
   r.   rI   r3  r4  )rI   r
  rJ   r3  r4  r-   Tr   r0   rw   )
r^   r`   ra   rb   rc   rd   re   rf   rg   rh   )r   r3  r8  rm  rn  ndimru  rv  rp  ro  normrq  expr$   r   r+   r,   r]   rU   )rr   rI   r	  ry  rw  rJ   r~  r3  r   r4  rZ  rR  r_   speech_outputstext_outputsrc   rb   rq  ra   r`   r^   r   r'   r'   r(   r{     s   4



z)ClvpModelForConditionalGeneration.forwardpad_to_max_mel_tokensc                 K   s  |j d }|| jjjd krtd| d| jjjd  |du r$| j}t|}|jdi |}	|	  | 
|	  t||d| jjj| jjjd\}}| j|||d}
| jj|
|||jd	}t|trh|j}|dur||j d  }tjjj|d
|f| jjd}| |}| j|||jd}| j||||jd}|d
 }|d
 }||jdddd }||jdddd }| j }t ||! | }|! }|js||||||d |d f}|r||d |d |d f7 }|S t"||||||d |d |j#|j#|j#d
S )a=
  
        Generate method for `ClvpModelForConditionalGeneration`, this method calls the `generate` method of
        `ClvpForCausalLM` and then uses those generated `speech_ids` to process `text_embeds` and `speech_embeds` using
        `ClvpEncoder`.

        Args:
            input_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Input text Tokens. Processed from the [`ClvpTokenizer`].
            input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*):
                Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`].
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            generation_config (`~generation.GenerationConfig`, *optional*):
                The generation configuration to be used as base parametrization for the generation call. `**kwargs`
                passed to generate matching the attributes of `generation_config` will override them. If
                `generation_config` is not provided, the default will be used, which had the following loading
                priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
                configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
                default values, whose documentation should be checked to parameterize generation.
            pad_to_max_mel_tokens (`int`, *optional*):
                Pads generated speech_ids to the specified value. This is to implement the same logic from the official
                repo, link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430
                and to make sure the logits are same.
                This does not affect generation quality so please don't consider using it since it is less efficient.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of decoder model, text encoder and speech encoder models.

        Returns:
            `ClvpOutput` or tuple: A `ClvpOutput` (if `return_dict_in_generate=True` or when
            `config.return_dict_in_generate=True`) or a tuple.
        r-   r
   z;Maximum sequence length reached! Found input_ids of length z:.Please make sure that the maximum length of input_ids is NF)rM   rK   rL   )r	  rI   rJ   )rZ  rz  r3  r4  r   r?   r  )rI   rJ   r3  r4  r.   Tr  )
r_   r`   ra   rb   rc   rd   re   rf   rg   rh   r'   )$r1   r   r   r   r   rz  copydeepcopyr_  validateZ_validate_model_kwargsrP   r   rK   rL   rm  rn  r|  Zreturn_dict_in_generater  r   	sequencesr$   r   r#   rC   rv  rp  ro  r  rq  r  r   r+   r]   rU   )rr   rI   r	  rJ   rz  r  r3  r`  r   rX  rZ  rR  r_   Zpadding_neededr  r  rc   rb   rq  ra   r`   r   r'   r'   r(   r|  5  s   
3




	z*ClvpModelForConditionalGeneration.generater  )NNNNNN)	NNNNNNNFN)rW   rX   rY   r   r$  rm   r$   ri   rv  r   r[   rx  r   r   r}  r   r   r   r   r]   r{   Zno_gradr   r|  r   r'   r'   ru   r(   ri    s    # 
;	
a	

 
ri  )ri  r  rN  r  r  r=  )r   )Nr   r>   r   TT)HrZ   r  r  dataclassesr   typingr   r   r   r   r   r$   Ztorch.utils.checkpointr   Ztorch.nnr	   Zactivationsr   r   Z
generationr   r   Zmodeling_attn_mask_utilsr   r   Zmodeling_outputsr   r   r   r   Zmodeling_utilsr   Zpytorch_utilsr   r   r  r   r   r   Zconfiguration_clvpr   r   r   Z
get_loggerrW   rJ  r   r)   r,   r5   r=   rP   rQ   r]   Modulerj   r   r   r   r   r   r   r   r   r   r  r  r=  rN  r  ri  __all__r'   r'   r'   r(   <module>   s   


+- ?d4 '  4C c    
