o
    ZhD                     @   s  d Z ddlmZmZmZmZmZ ddlZddlZddlm	Z	 ddl
mZ ddlmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZmZ ddlmZm Z m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z' e(e)Z*G dd de	j+Z,e-e, G dd de$Z.dd Z/d*ddZ0G dd de!Z1G dd deZ2G dd  d eZ3G d!d" d"e#Z4G d#d$ d$e"Z5G d%d& d&eeZ6G d'd( d(e Z7g d)Z8dS )+zPyTorch Cohere model.    )CallableListOptionalTupleUnionN)nn   )Cache)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)dynamic_rope_update)ALL_ATTENTION_FUNCTIONS)Unpack)ALL_LAYERNORM_LAYERS)
LossKwargslogging   )LlamaAttentionLlamaForCausalLMLlamaMLP
LlamaModelLlamaPreTrainedModelLlamaRotaryEmbeddingeager_attention_forward   )CohereConfigc                       s&   e Zd Zd fdd	Zdd Z  ZS )	CohereLayerNormNh㈵>Fc                    s&   t    tt|| _|| _dS )zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)super__init__r   	ParametertorchZonesweightvariance_epsilon)selfhidden_sizeepsbias	__class__ X/var/www/auris/lib/python3.10/site-packages/transformers/models/cohere/modular_cohere.pyr!   8   s   

zCohereLayerNorm.__init__c                 C   sl   |j }|tj}|jddd}|| djddd}|| t|| j  }| jtj| }||S )NT)Zkeepdimr   )	dtypetor#   Zfloat32meanpowZrsqrtr%   r$   )r&   hidden_statesZinput_dtyper1   Zvariancer,   r,   r-   forward>   s   
zCohereLayerNorm.forward)Nr   F)__name__
__module____qualname__r!   r4   __classcell__r,   r,   r*   r-   r   7   s    r   c                   @   s    e Zd Ze edd ZdS )CohereRotaryEmbeddingc           
      C   s   | j d d d d f  |jd dd}|d d d d d f  }t|jjtr2|jjdkr2|jjnd}tj	|dd* | |  
dd}tj|ddd	}| | j }| | j }	W d    n1 shw   Y  |j|jd
|	j|jd
fS )Nr   r.   r   ZmpscpuF)device_typeenabledr   dimr/   )Zinv_freqfloatexpandshape
isinstanceZdevicetypestrr#   Zautocast	transposeZrepeat_interleavecosZattention_scalingsinr0   r/   )
r&   xposition_idsZinv_freq_expandedZposition_ids_expandedr;   ZfreqsZembrG   rH   r,   r,   r-   r4   L   s   (&zCohereRotaryEmbedding.forwardN)r5   r6   r7   r#   Zno_gradr   r4   r,   r,   r,   r-   r9   K   s    r9   c                 C   sB   | dd d df }| ddd df }t j| |gddd}|S )N.r   r   r.   r=   )r#   stackflatten)rI   x1Zx2Zrot_xr,   r,   r-   rotate_half\   s   rO   c           	      C   sj   | j }|  } | }||}||}| | t| |  }|| t||  }|j|d|j|dfS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    r?   )r/   r@   Z	unsqueezerO   r0   )	qkrG   rH   rJ   Zunsqueeze_dimr/   Zq_embedZk_embedr,   r,   r-   apply_rotary_pos_embd   s   

rR   c                       s   e Zd Z fddZ  ZS )	CohereMLPc                    sR   t  | tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _d S )NF)r)   )	r    r!   r   Linearr'   Zintermediate_sizeZ	gate_projZup_projZ	down_projr&   configr*   r,   r-   r!      s   zCohereMLP.__init__)r5   r6   r7   r!   r8   r,   r,   r*   r-   rS      s    rS   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )CohereAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNrV   	layer_idxc                    sT   t  || |j| _| jr(t|j| jf|jd| _t|j| jf|jd| _	d S d S )Nr'   r(   )
r    r!   use_qk_normr   Znum_attention_headshead_dimlayer_norm_epsq_normZnum_key_value_headsk_normr&   rV   rX   r*   r,   r-   r!      s   zCohereAttention.__init__r3   position_embeddingsattention_maskpast_key_valuecache_positionkwargsreturnc                 K   sn  |j d d }g |d| jR }| ||}	| ||}
| ||}| jr6| |	}	| |
}
|		dd}	|
	dd}
|	dd}|\}}t
|	|
||\}	}
|d urj|||d}||
|| j|\}
}t}| jjdkr| jjdkr|ddrtd	 nt| jj }|| |	|
||f| jsd
n| j| jd|\}}|jg |dR   }| |}||fS )Nr.   r   r   )rH   rG   rc   eagerZsdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )Zdropoutscaling)rB   r[   Zq_projviewZk_projZv_projrZ   r]   r^   rF   rR   updaterX   r   rV   Z_attn_implementationgetloggerZwarning_oncer   ZtrainingZattention_dropoutri   Zreshape
contiguousZo_proj)r&   r3   r`   ra   rb   rc   rd   Zinput_shapeZhidden_shapeZquery_statesZ
key_statesZvalue_statesrG   rH   Zcache_kwargsZattention_interfaceZattn_outputZattn_weightsr,   r,   r-   r4      sL   	



zCohereAttention.forwardN)NN)r5   r6   r7   __doc__r   r   intr!   r#   Tensorr   r	   
LongTensorr   r
   r4   r8   r,   r,   r*   r-   rW      s(    rW   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )CohereDecoderLayerrV   rX   c                    s@   t    |j| _t||d| _t|| _t|j|jd| _	d S )N)rV   rX   rY   )
r    r!   r'   rW   	self_attnrS   mlpr   r\   input_layernormr_   r*   r,   r-   r!      s
   

zCohereDecoderLayer.__init__NFr3   ra   rJ   rb   rg   	use_cacherc   r`   rd   re   c	                 K   sb   |}
|  |}| jd||||||||d|	\}}| |}|
| | }|f}|r/||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )r3   ra   rJ   rb   rg   rx   rc   r`   Nr,   )rw   ru   rv   )r&   r3   ra   rJ   rb   rg   rx   rc   r`   rd   ZresidualZhidden_states_attentionZself_attn_weightsZhidden_states_mlpoutputsr,   r,   r-   r4      s(   
	


zCohereDecoderLayer.forward)NNNFFNN)r5   r6   r7   r   rq   r!   r#   rr   r   rs   r	   boolr   r   r
   FloatTensorr4   r8   r,   r,   r*   r-   rt      s<    
	
rt   c                   @   s   e Zd Zdd ZdS )CoherePreTrainedModelc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nrh   )r1   stdg      ?)rV   Zinitializer_rangerC   r   rT   r$   dataZnormal_r)   Zzero_Z	EmbeddingZpadding_idxr   Zfill_)r&   moduler}   r,   r,   r-   _init_weights  s   


z#CoherePreTrainedModel._init_weightsN)r5   r6   r7   r   r,   r,   r,   r-   r|     s    r|   c                       s"   e Zd Zdef fddZ  ZS )CohereModelrV   c                    sN   t    t fddt jD | _t d| _t	 j
 jd| _d S )Nc                    s   g | ]}t  |qS r,   )rt   ).0rX   rV   r,   r-   
<listcomp>+  s    z(CohereModel.__init__.<locals>.<listcomp>r   rY   )r    r!   r   Z
ModuleListrangeZnum_hidden_layersZlayersr9   Z
rotary_embr   r'   r\   ZnormrU   r*   r   r-   r!   (  s   zCohereModel.__init__)r5   r6   r7   r   r!   r8   r,   r,   r*   r-   r   '  s    r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r5   r6   r7   r,   r,   r,   r-   r   1  s    r   c                       s   e Zd Z fddZ											ddeej deej deej deee	e
ej f  d	eej d
eej dee dee dee deej deeejf dee defddZ  ZS )CohereForCausalLMc                    s*   t  | t|| _|j| _|j| _d S ro   )r    r!   r   modellogit_scaleZtie_word_embeddingsrU   r*   r,   r-   r!   5  s   
zCohereForCausalLM.__init__Nr   	input_idsra   rJ   past_key_valuesinputs_embedslabelsrx   rg   output_hidden_statesrc   logits_to_keeprd   re   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }|| j	 }d}|dur]| j
d||| j jd|}t|||j|j|jdS )az  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >> from transformers import AutoTokenizer, CohereForCausalLM

        >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
        >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")

        >> prompt = "Hey, are you conscious? Can you talk to me?"
        >> inputs = tokenizer(prompt, return_tensors="pt")

        >> # Generate
        >> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   ra   rJ   r   r   rx   rg   r   rc   )logitsr   
vocab_size)lossr   r   r3   
attentionsr,   )rV   rg   r   r   Zlast_hidden_staterC   rq   sliceZlm_headr   Zloss_functionr   r   r   r3   r   )r&   r   ra   rJ   r   r   r   rx   rg   r   rc   r   rd   ry   r3   Zslice_indicesr   r   r,   r,   r-   r4   ;  s<   %

zCohereForCausalLM.forward)NNNNNNNNNNr   )r5   r6   r7   r!   r   r#   rs   rr   r   r	   r   r{   rz   rq   r   r   r   r4   r8   r,   r,   r*   r-   r   4  sP    	
r   )r   r   r|   )Nr   )9rp   typingr   r   r   r   r   r#   Ztorch.utils.checkpointr   Zcache_utilsr	   Zmodeling_flash_attention_utilsr
   Zmodeling_layersr   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   Zmodeling_utilsr   Zprocessing_utilsr   Zpytorch_utilsr   utilsr   r   Zllama.modeling_llamar   r   r   r   r   r   r   Zconfiguration_coherer   Z
get_loggerr5   rm   Moduler   appendr9   rO   rR   rS   rW   rt   r|   r   r   r   __all__r,   r,   r,   r-   <module>   s<   $	


IE
R