
    fThD                         S r SSKJrJrJrJrJr  SSKrSSKrSSKJ	r	  SSK
Jr  SSKJr  SSKJr  SS	KJrJr  SS
KJr  SSKJr  SSKJr  SSKJr  SSKJrJr  SSKJrJ r J!r!J"r"J#r#J$r$J%r%  SSK&J'r'  \RP                  " \)5      r* " S S\	RV                  5      r,\RZ                  " \,5         " S S\$5      r.S r/S(S jr0 " S S\!5      r1 " S S\5      r2 " S S\5      r3 " S S \#5      r4 " S! S"\"5      r5 " S# S$\\5      r6 " S% S&\ 5      r7/ S'Qr8g))zPyTorch Cohere model.    )CallableListOptionalTupleUnionN)nn   )Cache)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)dynamic_rope_update)ALL_ATTENTION_FUNCTIONS)Unpack)ALL_LAYERNORM_LAYERS)
LossKwargslogging   )LlamaAttentionLlamaForCausalLMLlamaMLP
LlamaModelLlamaPreTrainedModelLlamaRotaryEmbeddingeager_attention_forward   )CohereConfigc                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )CohereLayerNorm7   c                    > [         TU ]  5         [        R                  " [        R
                  " U5      5      U l        X l        g)zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizeepsbias	__class__s       a/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/cohere/modular_cohere.pyr$   CohereLayerNorm.__init__8   s-    ll5::k#:; #    c                    UR                   nUR                  [        R                  5      nUR	                  SSS9nX-
  R                  S5      R	                  SSS9nX-
  [        R                  " X@R                  -   5      -  nU R                  R                  [        R                  5      U-  nUR                  U5      $ )NT)keepdimr   )	dtypetor&   float32meanpowrsqrtr)   r(   )r*   hidden_statesinput_dtyper8   variances        r/   forwardCohereLayerNorm.forward>   s    #))%((7!!"d!3!(--a055b$5G&-XH]H]=]1^^u}}5E,,r1   )r)   r(   )Ngh㈵>F)__name__
__module____qualname____firstlineno__r$   r>   __static_attributes____classcell__r.   s   @r/   r    r    7   s    $- -r1   r    c                   L    \ rS rSr\R
                  " 5       \S 5       5       rSrg)CohereRotaryEmbeddingK   c                 0   U R                   S S S 2S 4   R                  5       R                  UR                  S   SS5      nUS S 2S S S 24   R                  5       n[	        UR
                  R                  [        5      (       a0  UR
                  R                  S:w  a  UR
                  R                  OSn[        R                  " USS9   UR                  5       UR                  5       -  R                  SS5      n[        R                  " USSS	9nUR                  5       U R                  -  nUR                  5       U R                  -  n	S S S 5        WR                  UR                   S
9W	R                  UR                   S
94$ ! , (       d  f       N@= f)Nr   r3   r   mpscpuF)device_typeenabledr   dimr5   )inv_freqfloatexpandshape
isinstancedevicetypestrr&   autocast	transposerepeat_interleavecosattention_scalingsinr6   r5   )
r*   xposition_idsinv_freq_expandedposition_ids_expandedrM   freqsembr]   r_   s
             r/   r>   CohereRotaryEmbedding.forwardL   sB    !MM$4-8>>@GGHZHZ[\H]_acde ,QaZ 8 > > @'1!((--'E'E!((--[`J`ahhmmfk^^UC&,,.1F1L1L1NNYYZ[]^_E))%;C'')d444C'')d444C	 D vvAGGv$cff177f&;;; DCs   BF
F N)	r@   rA   rB   rC   r&   no_gradr   r>   rD   rg   r1   r/   rH   rH   K   s"    
]]_<  <r1   rH   c                 |    U SS S S24   nU SSS S24   n[         R                  " U* U/SS9R                  S5      nU$ )N.r   r   r3   rO   )r&   stackflatten)r`   x1x2rot_xs       r/   rotate_halfrp   \   sL    	
3!8B	
319BKK"b	r*2226ELr1   c                 &   U R                   nU R                  5       n UR                  5       nUR                  U5      nUR                  U5      nX-  [        U 5      U-  -   nX-  [        U5      U-  -   nUR	                  US9UR	                  US94$ )a  Applies Rotary Position Embedding to the query and key tensors.

Args:
    q (`torch.Tensor`): The query tensor.
    k (`torch.Tensor`): The key tensor.
    cos (`torch.Tensor`): The cosine part of the rotary embedding.
    sin (`torch.Tensor`): The sine part of the rotary embedding.
    position_ids (`torch.Tensor`, *optional*):
        Deprecated and unused.
    unsqueeze_dim (`int`, *optional*, defaults to 1):
        The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
        sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
        that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
        k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
        cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
        the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
    `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
rQ   )r5   rS   	unsqueezerp   r6   )	qkr]   r_   ra   unsqueeze_dimr5   q_embedk_embeds	            r/   apply_rotary_pos_embrx   d   s    ( GGE		A		A
--
&C
--
&Cw;q>C/0Gw;q>C/0G::E:"GJJUJ$;;;r1   c                   (   ^  \ rS rSrU 4S jrSrU =r$ )	CohereMLP   c                 >  > [         TU ]  U5        [        R                  " U R                  U R
                  SS9U l        [        R                  " U R                  U R
                  SS9U l        [        R                  " U R
                  U R                  SS9U l        g )NF)r-   )	r#   r$   r   Linearr+   intermediate_size	gate_projup_proj	down_projr*   configr.   s     r/   r$   CohereMLP.__init__   ss     4#3#3T5K5KRWXyy!1!143I3IPUV4#9#94;K;KRWXr1   )r   r   r   )r@   rA   rB   rC   r$   rD   rE   rF   s   @r/   rz   rz      s    Y Yr1   rz   c                   P  ^  \ rS rSrSrSS\S\\   4U 4S jjjr  SS\	R                  S\\	R                  \	R                  4   S\\	R                     S	\\   S
\\	R                     S\\   S\\	R                  \\	R                     \\\	R                        4   4S jjrSrU =r$ )CohereAttention   z=Multi-headed attention from 'Attention Is All You Need' paperr   	layer_idxc                 &  > [         TU ]  X5        UR                  U l        U R                  (       a_  [        UR                  U R
                  4UR                  S9U l        [        UR                  U R
                  4UR                  S9U l	        g g )Nr+   r,   )
r#   r$   use_qk_normr    num_attention_headshead_dimlayer_norm_epsq_normnum_key_value_headsk_normr*   r   r   r.   s      r/   r$   CohereAttention.__init__   sz    +!--)#77GVMbMbDK *#77GVMbMbDK r1   r;   position_embeddingsattention_maskpast_key_valuecache_positionkwargsreturnc                 4   UR                   S S n/ UQSPU R                  P7nU R                  U5      R                  U5      n	U R	                  U5      R                  U5      n
U R                  U5      R                  U5      nU R                  (       a"  U R                  U	5      n	U R                  U
5      n
U	R                  SS5      n	U
R                  SS5      n
UR                  SS5      nUu  p[        XX5      u  pUb$  XUS.nUR                  XU R                  U5      u  p[        nU R                  R                  S:w  ad  U R                  R                  S:X  a-  UR!                  SS5      (       a  ["        R%                  S	5        O[&        U R                  R                     nU" U U	U
UU4U R(                  (       d  S
OU R*                  U R,                  S.UD6u  nnUR.                  " / UQSP76 R1                  5       nU R3                  U5      nUU4$ )Nr3   r   r   )r_   r]   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )dropoutscaling)rU   r   q_projviewk_projv_projr   r   r   r[   rx   updater   r   r   _attn_implementationgetloggerwarning_oncer   trainingattention_dropoutr   reshape
contiguouso_proj)r*   r;   r   r   r   r   r   input_shapehidden_shapequery_states
key_statesvalue_statesr]   r_   cache_kwargsattention_interfaceattn_outputattn_weightss                     r/   r>   CohereAttention.forward   s    $))#2.88b8$--8{{=166|D[[/44\B
{{=166|D;;|4LZ0J#--a3))!Q/
#--a3&#7RU#[ %#&nUL'5'<'<ZW[WeWegs't$J(?;;++w6{{//69fjjI\^c>d>d##L
 '>dkk>^>^&_#$7	%
  $}}C$2H2HLL	%
 	%
!\ "));;;;FFHkk+.L((r1   )r   r   r   N)NN)r@   rA   rB   rC   __doc__r   r   intr$   r&   Tensorr   r
   
LongTensorr   r   r>   rD   rE   rF   s   @r/   r   r      s    G
| 
 
 
" +/597)||7) #5<<#=>7) !.	7)
 !7) !!1!127) -.7) 
u||Xell3XeELL>Q5RR	S7) 7)r1   r   c                     ^  \ rS rSrS\S\4U 4S jjr       SS\R                  S\	\R                     S\	\R                     S\	\   S	\	\   S
\	\   S\	\R                     S\	\\R                  \R                  4      S\\   S\\R                   \	\\R                   \R                   4      4   4S jjrSrU =r$ )CohereDecoderLayer   r   r   c                    > [         TU ]  5         UR                  U l        [        XS9U l        [        U5      U l        [        UR                  UR                  S9U l	        g )N)r   r   r   )
r#   r$   r+   r   	self_attnrz   mlpr    r   input_layernormr   s      r/   r$   CohereDecoderLayer.__init__   sP    !--(LV$.F<N<NU[UjUjkr1   r;   r   ra   r   r   	use_cacher   r   r   r   c	                     Un
U R                  U5      nU R                  " SUUUUUUUUS.U	D6u  pU R                  U5      nX-   U-   nU4nU(       a  X4-  nU$ )a  
Args:
    hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
    attention_mask (`torch.FloatTensor`, *optional*):
        attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
        query_sequence_length, key_sequence_length)` if default attention is used.
    past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
    use_cache (`bool`, *optional*):
        If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
        (see `past_key_values`).
    cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
        Indices depicting the position of the input sequence tokens in the sequence
    position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
        Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
        with `head_dim` being the embedding dimension of each attention head.
)r;   r   ra   r   r   r   r   r   rg   )r   r   r   )r*   r;   r   ra   r   r   r   r   r   r   residualhidden_states_attentionself_attn_weightshidden_states_mlpoutputss                  r/   r>   CohereDecoderLayer.forward   s    > !,,]; 6:^^ 
6
')%)/) 3
6
 
6
2 !HH]3 !:=NN "++Gr1   )r+   r   r   r   )NNNFFNN)r@   rA   rB   rC   r   r   r$   r&   r   r   r   r
   boolr   r   r   FloatTensorr>   rD   rE   rF   s   @r/   r   r      s   l| l l 2637*.,1$)59KO:||: !.: u//0	:
 !: $D>: D>: !!1!12: &eELL%,,,F&GH: -.: 
u  (51B1BEDUDU1U+V"WW	X: :r1   r   c                       \ rS rSrS rSrg)CoherePreTrainedModeli  c                    U R                   R                  n[        U[        R                  5      (       aW  UR
                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       ad  UR
                  R                  R                  SUS9  UR                  b2  UR
                  R                  UR                     R                  5         g g [        U[        5      (       a&  UR
                  R                  R                  S5        g g )Nr   )r8   stdg      ?)r   initializer_rangerV   r   r}   r(   datanormal_r-   zero_	Embeddingpadding_idxr    fill_)r*   moduler   s      r/   _init_weights#CoherePreTrainedModel._init_weights  s    kk++fbii((MM&&CS&9{{&  &&( '--MM&&CS&9!!-""6#5#56<<> .00MM$$S) 1r1   rg   N)r@   rA   rB   rC   r   rD   rg   r1   r/   r   r     s    *r1   r   c                   0   ^  \ rS rSrS\4U 4S jjrSrU =r$ )CohereModeli'  r   c           	         > [         TU ]  U5        [        R                  " [	        UR
                  5       Vs/ s H  n[        X5      PM     sn5      U l        [        US9U l	        [        UR                  UR                  S9U l        g s  snf )N)r   r   )r#   r$   r   
ModuleListrangenum_hidden_layersr   layersrH   
rotary_embr    r+   r   normr   s      r/   r$   CohereModel.__init__(  ss     mmDI&JbJbDcdDcy2Dcd
 0v>#1C1C&J_J_`	 es   B)r   r   r   )r@   rA   rB   rC   r   r$   rD   rE   rF   s   @r/   r   r   '  s    a| a ar1   r   c                       \ rS rSrSrg)KwargsForCausalLMi1  rg   N)r@   rA   rB   rC   rD   rg   r1   r/   r   r   1  s    3r1   r   c                   |  ^  \ rS rSrU 4S jr           SS\\R                     S\\R                     S\\R                     S\\	\
\\R                     4      S\\R                     S\\R                     S	\\   S
\\   S\\   S\\R                     S\	\\R                  4   S\\   S\4S jjrSrU =r$ )CohereForCausalLMi4  c                    > [         TU ]  U5        [        U5      U l        UR                  U l        UR
                  U l        g r   )r#   r$   r   modellogit_scaletie_word_embeddingsr   s     r/   r$   CohereForCausalLM.__init__5  s8      (
!--#)#=#= r1   	input_idsr   ra   past_key_valuesinputs_embedslabelsr   r   output_hidden_statesr   logits_to_keepr   r   c                    Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	U R                  " SUUUUUUUU	U
S.	UD6nUR                  n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nUU R                  -  nSnUb)  U R                  " SUX`R                   R                  S.UD6n[        UUUR                  UR                  UR                  S9$ )a  
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
    (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>> from transformers import AutoTokenizer, CohereForCausalLM

>> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")

>> prompt = "Hey, are you conscious? Can you talk to me?"
>> inputs = tokenizer(prompt, return_tensors="pt")

>> # Generate
>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```N)	r   r   ra   r   r   r   r   r   r   )logitsr   
vocab_size)lossr   r   r;   
attentionsrg   )r   r   r   r   last_hidden_staterV   r   slicelm_headr   loss_functionr   r   r   r;   r   )r*   r   r   ra   r   r   r   r   r   r   r   r   r   r   r;   slice_indicesr   r   s                     r/   r>   CohereForCausalLM.forward;  s(   J 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0:: ,
)%+'/!5),
 ,
  118B>SV8W8W~ot4]kmA}a,?@A$***%%pVF{{OeOepiopD%#33!//))
 	
r1   )r   r   r   )NNNNNNNNNNr   )r@   rA   rB   rC   r$   r   r&   r   r   r   r
   r   r   r   r   r   r   r   r>   rD   rE   rF   s   @r/   r   r   4  s>   > 151537KO59-1$(,0/35934H
E,,-H
 !.H
 u//0	H

 "%tE4E4E/F(F"GHH
   1 12H
 ))*H
 D>H
 $D>H
 'tnH
 !!1!12H
 c5<</0H
 *+H
 
 H
 H
r1   r   )r   r   r   )Nr   )9r   typingr   r   r   r   r   r&   torch.utils.checkpointr   cache_utilsr
   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   modeling_utilsr   processing_utilsr   pytorch_utilsr   utilsr   r   llama.modeling_llamar   r   r   r   r   r   r   configuration_coherer   
get_loggerr@   r   Moduler    appendrH   rp   rx   rz   r   r   r   r   r   r   __all__rg   r1   r/   <module>r     s   .  9 9      B 9 O 6 5 & 1 (   / 
		H	%-bii -"   O ,<0 <"<<Y YF)n F)RB3 BJ*0 *a* a ?,j >O
( O
dr1   