
    fTh                     `   S SK JrJrJrJrJr  S SKrS SKJr  SSK	J
r
  SSKJrJrJr  SSKJr  SSKJr  SSKJr  SS	KJrJr  SS
KJrJr  SSKJrJr  SSKJr  SSK J!r!J"r"J#r#J$r$J%r%  SSK&J'r'  SSK(J)r)  \$" 5       (       a  S SK*J+r+  SSK,J-r-  \%R\                  " \/5      r0 " S S\Rb                  5      r2 " S S\Rb                  5      r3S\Rh                  S\5S\Rh                  4S jr6 S4S\Rb                  S\Rh                  S\Rh                  S\Rh                  S\\Rh                     S \7S!\74S" jjr8S# r9S5S$ jr: " S% S&\Rb                  5      r; " S' S(\Rb                  5      r< " S) S*\5      r=\" " S+ S,\5      5       r>\" " S- S.\>5      5       r? " S/ S0\\!5      r@\" " S1 S2\>\5      5       rA/ S3QrBg)6    )CallableListOptionalTupleUnionN   )ACT2FN)CacheHybridCacheStaticCache)GenerationMixin)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging)deprecate_kwarg   )Cohere2Config)	BlockMask)make_flex_block_causal_maskc                   l   ^  \ rS rSrSS\4U 4S jjjr\R                  " 5       \S 5       5       r	Sr
U =r$ )Cohere2RotaryEmbedding2   configc                   > [         TU ]  5         [        US5      (       aH  UR                  b;  UR                  R	                  SUR                  R	                  S5      5      U l        OSU l        UR                  U l        UR                  U l        Xl	        [        U R
                     U l        U R                  U R                  U5      u  o0l        U R                  SUSS9  U R                  U l        g )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)super__init__hasattrr&   getr'   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr$   r   rope_init_fnattention_scalingregister_bufferr*   original_inv_freq)selfr$   devicer*   	__class__s       d/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/cohere2/modeling_cohere2.pyr-   Cohere2RotaryEmbedding.__init__3   s    6>**v/B/B/N#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q((ZeD!%    c                 0   U R                   S S S 2S 4   R                  5       R                  UR                  S   SS5      nUS S 2S S S 24   R                  5       n[	        UR
                  R                  [        5      (       a0  UR
                  R                  S:w  a  UR
                  R                  OSn[        R                  " USS9   UR                  5       UR                  5       -  R                  SS5      n[        R                  " USSS	9nUR                  5       U R                  -  nUR                  5       U R                  -  n	S S S 5        WR                  UR                   S
9W	R                  UR                   S
94$ ! , (       d  f       N@= f)Nr   r   mpscpuF)device_typeenabled   dimdtype)r*   floatexpandshape
isinstancer8   r(   strtorchautocast	transposerepeat_interleavecosr4   sintorG   )
r7   xposition_idsinv_freq_expandedposition_ids_expandedrA   freqsembrQ   rR   s
             r:   forwardCohere2RotaryEmbedding.forwardD   sB    !MM$4-8>>@GGHZHZ[\H]_acde ,QaZ 8 > > @'1!((--'E'E!((--[`J`ahhmmfk^^UC&,,.1F1L1L1NNYYZ[]^_E))%;C'')d444C'')d444C	 D vvAGGv$cff177f&;;; DCs   BF
F)r4   r$   r1   r6   r2   r3   r'   N)__name__
__module____qualname____firstlineno__r   r-   rM   no_gradr   rZ   __static_attributes____classcell__r9   s   @r:   r"   r"   2   s6    /} / /" ]]_<  <r<   r"   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )Cohere2LayerNormT   c                    > [         TU ]  5         [        R                  " [        R
                  " U5      5      U l        X l        g)zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)r,   r-   nn	ParameterrM   onesweightvariance_epsilon)r7   hidden_sizeepsbiasr9   s       r:   r-   Cohere2LayerNorm.__init__U   s-    ll5::k#:; #r<   c                    UR                   nUR                  [        R                  5      nUR	                  SSS9nX-
  R                  S5      R	                  SSS9nX-
  [        R                  " X@R                  -   5      -  nU R                  R                  [        R                  5      U-  nUR                  U5      $ )Nr>   T)keepdimrC   )	rG   rS   rM   float32meanpowrsqrtrm   rl   )r7   hidden_statesinput_dtyperu   variances        r:   rZ   Cohere2LayerNorm.forward[   s    #))%((7!!"d!3!(--a055b$5G&-XH]H]=]1^^u}}5E,,r<   )rm   rl   )Ngh㈵>Fr]   r^   r_   r`   r-   rZ   rb   rc   rd   s   @r:   rf   rf   T   s    $- -r<   rf   rx   n_repreturnc                     U R                   u  p#pEUS:X  a  U $ U SS2SS2SSS2SS24   R                  X#XU5      n U R                  X#U-  XE5      $ )z
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
r   N)rJ   rI   reshape)rx   r}   batchnum_key_value_headsslenhead_dims         r:   	repeat_kvr   e   s_    
 2?1D1D.Ez!!Qa"23::5W\dlmM  e(CTTTr<   modulequerykeyvalueattention_maskscalingdropoutc                 @   [        X R                  5      n[        X0R                  5      n	[        R                  " XR	                  SS5      5      U-  n
Ub"  US S 2S S 2S S 2S UR
                  S   24   nX-   n
[        R                  R                  U
S[        R                  S9R                  UR                  5      n
[        R                  R                  XU R                  S9n
[        R                  " X5      nUR	                  SS5      R                  5       nX4$ )NrC   r   r>   )rE   rG   )ptrainingr   )r   num_key_value_groupsrM   matmulrO   rJ   ri   
functionalsoftmaxrt   rS   rG   r   r   
contiguous)r   r   r   r   r   r   r   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputs                r:   eager_attention_forwardr   q   s     3 ; ;<JU$?$?@L<<';';Aq'ABWLL!$Q1.D
0@0@0D.D%DE#1==((2U]](SVVW\WbWbcL==((6??([L,,|:K''1-88:K$$r<   c                 |    U SS S S24   nU SSS S24   n[         R                  " U* U/SS9R                  S5      nU$ )N.rC   r   r>   rD   r   )rM   stackflatten)rT   x1x2rot_xs       r:   rotate_halfr      sL    	
3!8B	
319BKK"b	r*2226ELr<   c                 &   U R                   nU R                  5       n UR                  5       nUR                  U5      nUR                  U5      nX-  [        U 5      U-  -   nX-  [        U5      U-  -   nUR	                  US9UR	                  US94$ )a  Applies Rotary Position Embedding to the query and key tensors.

Args:
    q (`torch.Tensor`): The query tensor.
    k (`torch.Tensor`): The key tensor.
    cos (`torch.Tensor`): The cosine part of the rotary embedding.
    sin (`torch.Tensor`): The sine part of the rotary embedding.
    position_ids (`torch.Tensor`, *optional*):
        Deprecated and unused.
    unsqueeze_dim (`int`, *optional*, defaults to 1):
        The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
        sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
        that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
        k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
        cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
        the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
    `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
rF   )rG   rH   	unsqueezer   rS   )	qkrQ   rR   rU   unsqueeze_dimrG   q_embedk_embeds	            r:   apply_rotary_pos_embr      s    ( GGE		A		A
--
&C
--
&Cw;q>C/0Gw;q>C/0G::E:"GJJUJ$;;;r<   c                   P  ^  \ rS rSrSrSS\S\\   4U 4S jjjr  SS\	R                  S\\	R                  \	R                  4   S\\	R                     S	\\   S
\\	R                     S\\   S\\	R                  \\	R                     \\\	R                        4   4S jjrSrU =r$ )Cohere2Attention   z=Multi-headed attention from 'Attention Is All You Need' paperr$   	layer_idxc                   > [         TU ]  5         Xl        X l        [	        USUR
                  UR                  -  5      U l        UR                  UR                  -  U l	        U R                  S-  U l
        UR                  U l        SU l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR                  U R                  -  UR
                  UR                  S9U l        U R                  S-   U R                  R(                  -  S:w  a  UR*                  U l        g S U l        g )Nr   g      Trp   r   r   )r,   r-   r$   r   getattrrn   num_attention_headsr   r   r   r   attention_dropout	is_causalri   Linearattention_biasq_projk_projv_projo_projsliding_window_patternsliding_windowr7   r$   r   r9   s      r:   r-   Cohere2Attention.__init__   s   "
F4F4F&JdJd4de$*$>$>&B\B\$\!}}d*!'!9!9ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii&&68J8JQWQfQf
 '+nnq&8DKK<^<^%^bc%cF!! 	im 	r<   rx   position_embeddingsr   past_key_valuecache_positionr   r~   c                    UR                   S S n/ UQSPU R                  P7nU R                  U5      R                  U5      R	                  SS5      n	U R                  U5      R                  U5      R	                  SS5      n
U R                  U5      R                  U5      R	                  SS5      nUu  pU R                  b  [        XX5      u  pUb}  UUU R                  US.nUR                  XU R                  U5      u  pUbJ  U R                  R                  S:X  a0  UR                   S   nU
S S 2S S 2S U2S S 24   US S 2S S 2S U2S S 24   p[        nU R                  R                  S:w  ad  U R                  R                  S:X  a-  UR                  SS	5      (       a  [        R!                  S
5        O["        U R                  R                     nU" U U	U
UU4U R$                  (       d  SOU R&                  U R(                  U R                  S.UD6u  nnUR*                  " / UQSP76 R-                  5       nU R/                  U5      nUU4$ )Nr>   r   rC   )rR   rQ   r   r   flash_attention_2eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )r   r   r   )rJ   r   r   viewrO   r   r   r   r   updater   r$   _attn_implementationr   r/   loggerwarning_oncer   r   r   r   r   r   r   )r7   rx   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   rQ   rR   cache_kwargsseq_lenattention_interfacer   r   s                      r:   rZ   Cohere2Attention.forward   sT    $))#2.88b8$--8{{=166|DNNqRST[[/44\BLLQPQR
{{=166|DNNqRST&*';LVY'_$L%"&"5"5"0	L (6'<'<ZW[WeWegs't$J )dkk.N.NRe.e(..r2+5aHWHa6G+H,WXZ[]e^e]eghWhJiL(?;;++w6{{//69fjjI\^c>d>d##L
 '>dkk>^>^&_#$7
%
  $}}C$2H2HLL..
%
 
%
!\ "));;;;FFHkk+.L((r<   )r   r$   r   r   r   r   r   r   r   r   r   r   r\   )NN)r]   r^   r_   r`   __doc__r   r   intr-   rM   Tensorr   r
   
LongTensorr   r   rZ   rb   rc   rd   s   @r:   r   r      s    G
} 
# 
 
> +/59:)||:) #5<<#=>:) !.	:)
 !:) !!1!12:) -.:) 
u||Xell3XeELL>Q5RR	S:) :)r<   r   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )
Cohere2MLPi  c                   > [         TU ]  5         Xl        UR                  U l        UR                  U l        [
        R                  " U R                  U R                  SS9U l        [
        R                  " U R                  U R                  SS9U l        [
        R                  " U R                  U R                  SS9U l	        [        UR                     U l        g NFr   )r,   r-   r$   rn   intermediate_sizeri   r   	gate_projup_proj	down_projr	   
hidden_actact_fnr7   r$   r9   s     r:   r-   Cohere2MLP.__init__  s    !--!'!9!94#3#3T5K5KRWXyy!1!143I3IPUV4#9#94;K;KRWXV../r<   c                     U R                  U R                  U R                  U5      5      U R                  U5      -  5      nU$ r\   )r   r   r   r   )r7   rT   r   s      r:   rZ   Cohere2MLP.forward  s6    NN4;;t~~a/@#ADLLQRO#ST	r<   )r   r$   r   r   rn   r   r   r|   rd   s   @r:   r   r     s    0 r<   r   c                   l  ^  \ rS rSrS\S\4U 4S jjr\" SSS9     SS\R                  S	\
\R                  \R                  4   S
\\R                     S\\   S\\   S\\   S\\R                     S\\   S\
\R"                  \\
\R"                  \R"                  4      4   4S jj5       rSrU =r$ )Cohere2DecoderLayeri  r$   r   c                 @  > [         TU ]  5         UR                  U l        [        X5      U l        [        U5      U l        [        UR                  UR                  S9U l	        Xl
        US-   U R                  R                  -  S:g  U l        UR                  U l        g )Nrn   ro   r   r   )r,   r-   rn   r   	self_attnr   mlprf   layer_norm_epsinput_layernormr$   r   
is_slidingr   r   s      r:   r-   Cohere2DecoderLayer.__init__  s    !--)&<f%/V=O=OV\VkVkl$q=DKK,N,NNRSS$33r<   last_cache_positionz4.53.0)versionrx   r   r   r   r   	use_cacher   r   r~   c                 J   U R                   (       Ga8  UGb4  [        UR                  S   U R                  5      n	U R                  R
                  S:X  a  USS2U	* S24   nO[        R                  " UR                  5      R                  n
[        R                  " [        R                  " U[        R                  S9U R                  * S9n[        R                  " XU5      nUS   U	-
  S-   n[        R                  " USS9n[        R                  " [        XR                  S   5      UR                   S	9nX-  nUSS2SS2SS2U4   nUnU R#                  U5      nU R$                  " SUUUUUUUS
.UD6u  nnU R'                  U5      nX-   U-   nU4nU(       a  UU4-  nU$ )a  
Args:
    hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
    position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`):
        Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
        with `head_dim` being the embedding dimension of each attention head.
    attention_mask (`torch.FloatTensor`, *optional*):
        attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
        query_sequence_length, key_sequence_length)` if default attention is used.
    past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
    use_cache (`bool`, *optional*):
        If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
        (see `past_key_values`).
    cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
        Indices depicting the position of the input sequence tokens in the sequence
Nr   r   rF   diagonalr>   r   )minr8   )rx   r   r   r   r   r   r    )r   maxrJ   r   r$   r   rM   finforG   r   tril	ones_likeboolwhereclamparanger8   r   r   r   )r7   rx   r   r   r   r   r   r   r   effective_seq_len	min_dtypesliding_window_maskoffsetmask_indexesresidualhidden_states_attentionself_attn_weightshidden_states_mlpoutputss                      r:   rZ   Cohere2DecoderLayer.forward&  s   @ ???~9 #N$8$8$;T=P=P Q {{//3FF!/4E3E3F0F!G "KK(;(;<@@	&+jjOON%**EQUQdQdPd'# "'-@^!\'+.??!CV3  %||)+?+?+CD^MbMb  &!/1a0E!F ,,]; 6:^^ 	6
' 3))/)	6
 	6
2!2 !HH]3 !:=NN ")++Gr<   )r$   rn   r   r   r   r   r   )NNFFN)r]   r^   r_   r`   r   r   r-   r   rM   r   r   r   r
   r   r   r   r   FloatTensorrZ   rb   rc   rd   s   @r:   r   r     s   4} 4 4 *H=
 26*.,1$)59U||U #5<<#=>U !.	U
 !U $D>U D>U !!1!12U -.U 
u  (51B1BEDUDU1U+V"WW	XU >Ur<   r   c                   N    \ rS rSr\rSrSrS/rS/r	Sr
SrSrSrSrSrSrS rSrg)	Cohere2PreTrainedModeli  modelTr   past_key_valuesc                    U R                   R                  n[        U[        R                  5      (       aW  UR
                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       ad  UR
                  R                  R                  SUS9  UR                  b2  UR
                  R                  UR                     R                  5         g g [        U[        5      (       a&  UR
                  R                  R                  S5        g g )Nr   )ru   stdg      ?)r$   initializer_rangerK   ri   r   rl   datanormal_rp   zero_	Embeddingpadding_idxrf   fill_)r7   r   r  s      r:   _init_weights$Cohere2PreTrainedModel._init_weights  s    kk++fbii((MM&&CS&9{{&  &&( '--MM&&CS&9!!-""6#5#56<<> . 011MM$$S) 2r<   r   N)r]   r^   r_   r`   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr  rb   r   r<   r:   r  r    sS     L&*#./#4"5!N  $!"&*r<   r  c                   6  ^  \ rS rSrS\4U 4S jjrS rS r\\	         SS\
\R                     S\
\R                     S\
\R                     S	\
\   S
\
\R                     S\
\   S\
\   S\
\   S\
\R                     S\\   S\4S jj5       5       r\R*                  " 5        SS\\R                  S4   S\R                  S\R                  S	\S\4
S jj5       r\S\R                  S\S\S\R4                  S\R                  S\4S j5       rSrU =r$ )Cohere2Modeli  r$   c           	        > [         TU ]  U5        UR                  U l        UR                  U l        [
        R                  " UR                  UR                  U R                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[        X5      PM     sn5      U l        [        UR                  UR                  S9U l        [#        US9U l        SU l        U R)                  5         g s  snf )Nr   )r$   F)r,   r-   pad_token_idr  
vocab_sizeri   r  rn   embed_tokens
ModuleListrangenum_hidden_layersr   layersrf   r   normr"   
rotary_embgradient_checkpointing	post_initr   s      r:   r-   Cohere2Model.__init__  s     !.. ++LL):):F<N<NPTP`P`ammEJ6KcKcEdeEd	 3Ede
 %&2D2D6K`K`a	0?&+# 	 fs   C?c                     U R                   $ r\   r.  r7   s    r:   get_input_embeddings!Cohere2Model.get_input_embeddings  s       r<   c                     Xl         g r\   r9  r7   r   s     r:   set_input_embeddings!Cohere2Model.set_input_embeddings  s    !r<   	input_idsr   rU   r  inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsr~   c
                 :   Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nUS L US L-  (       a  [	        S5      eU R
                  (       a/  U R                  (       a  U(       a  [        R                  S5        SnUc  U R                  U5      nU(       aN  UcK  U R                  (       d:  UR                  u  pn[        U R                   UUUR                  U R                  S9nU	cD  Ub  UR                  5       OSn[        R                   " XUR                  S   -   UR                  S9n	Uc  U	R#                  S5      nU R%                  X%XU5      nUnU R'                  UU5      nU(       a  SOS nU(       a  SOS nU R(                   H7  nU(       a  UU4-  nU" U4UUUUUU	S	.U
D6nUS   nU(       d  M.  UUS   4-  nM9     U R+                  U5      nU(       a  UU4-  n[-        UUUUS
9$ )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.F)max_batch_sizemax_cache_lenrG   r8   r   r   r   r   )r   r   r   r   r   r   )last_hidden_stater  rx   
attentions)r$   r   rC  r   
ValueErrorr5  r   r   r   r.  rJ   r   rG   r8   get_seq_lengthrM   r  r   _update_causal_maskr4  r2  r3  r   )r7   rA  r   rU   r  rB  r   r   rC  r   rD  
batch_sizer   _past_seen_tokensr   rx   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                         r:   rZ   Cohere2Model.forward  sI    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==Yj I  --i8M0%2%8%8"J))%#)){{O !CRC^==?de"\\ ]5H5H5K"KTaThThN )33A6L..>L]
 & #oom\J #7BD0d![[M#!m%55!)	$7*."3#-	 $	M *!,M  =#3"55% )( 		-0  -!11&+++%	
 	
r<   r   input_tensorc           
         U R                   R                  S:X  a  U$ U R                   R                  S:X  a,  [        U[        R                  5      (       a  [        U5      nU$ UR                  UR                  pvUR                  S   n[        U[        [        45      (       a  UR                  5       n	O!Ub  UR                  S   OUR                  S   n	U R                  UUU	UUUUR                  S   S9n
U
$ )Nr   flex_attentionr   r>   r   sequence_lengthtarget_lengthrG   r8   r   rM  )r$   r   rK   rM   r   r    rG   r8   rJ   r   r   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_position)r7   r   rU  r   r  r   rG   r8   rY  rZ  r   s              r:   rL   Cohere2Model._update_causal_mask  s     ;;++/BB!!;;++/??.%,,77!<^!L!!$**L,?,?v&,,Q/o['ABB+??AM8F8RN004XdXjXjklXmM PP+')#))!, Q 
 r<   rY  rZ  rG   rM  c                    U b  U R                  5       S:X  a  U nU$ [        R                  " U5      R                  n[        R                  " X4XUR
                  S9nUS:w  a  [        R                  " USS9nU[        R                  " X$R
                  S9UR                  SS5      :  -  nUSSSS2SS24   R                  USSS5      nU b  UR                  5       nU R                  S   n	USS2SS2SS2SU	24   U SS2SSSS24   R                  UR
                  5      -   n
U
S:H  n
USS2SS2SS2SU	24   R                  X5      USS2SS2SS2SU	24'   U$ )	a  
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

Args:
    attention_mask (`torch.Tensor`):
        A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
        `(batch_size, 1, query_length, key_value_length)`.
    sequence_length (`int`):
        The sequence length being processed.
    target_length (`int`):
        The target length: when generating with static cache, the mask should be as long as the static cache,
        to account for the 0 padding, the part of the cache that is not filled yet.
    dtype (`torch.dtype`):
        The dtype to use for the 4D attention mask.
    cache_position (`torch.Tensor`):
        Indices depicting the position of the input sequence tokens in the sequence.
    batch_size (`torch.Tensor`):
        Batch size.
N   )
fill_valuerG   r8   r   r   r   r>   r   )rE   rM   r   r   fullr8   triur  r   rI   clonerJ   rS   masked_fill)r   rY  rZ  rG   r   rM  r   r   r  mask_lengthpadding_masks              r:   r\  BCohere2Model._prepare_4d_causal_attention_mask_with_cache_position=  s}   < %.*<*<*>!*C(K* ' E*..I** 0Y\j\q\qK !##jjqA5<<>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c 6Aq!\k\12 r<   )r.  r5  r2  r3  r  r4  r-  )	NNNNNNNNN)F)r]   r^   r_   r`   r   r-   r;  r?  r   r   r   rM   r   r   r   r  r   r   r   r   rZ   ra   r   rL  staticmethodr   rG   r\  rb   rc   rd   s   @r:   r*  r*    s   }  !"  1515371559$(,0/359^
E,,-^
 !.^
 u//0	^

 "+.^
   1 12^
 D>^
 $D>^
 'tn^
 !!1!12^
 $$89^
 
!^
  ^
@ ]]_ #($ellK78$ ll$ 	$
 %$  $ $L 444 4 {{	4
 4 4 4r<   r*  c                       \ rS rSrSrg)KwargsForCausalLMiu  r   N)r]   r^   r_   r`   rb   r   r<   r:   rj  rj  u  s    3r<   rj  c                     ^  \ rS rSrS/rSS0rSS/S/40rS\4U 4S jjrS	 r	S
 r
S rS rS rS r\\           SS\\R&                     S\\R(                     S\\R&                     S\\\\\R0                     4      S\\R0                     S\\R&                     S\\   S\\   S\\   S\\R&                     S\\\R(                  4   S\\   S\4S jj5       5       r       S S jrSr U =r!$ )!Cohere2ForCausalLMix  zlm_head.weightlm_headcolwise_reprx   logitsr$   c                 (  > [         TU ]  U5        [        U5      U l        UR                  U l        [
        R                  " UR                  UR                  SS9U l        UR                  U l	        UR                  U l
        U R                  5         g r   )r,   r-   r*  r  r-  ri   r   rn   rm  logit_scaletie_word_embeddingsr6  r   s     r:   r-   Cohere2ForCausalLM.__init__~  sq     !&)
 ++yy!3!3V5F5FUS!--#)#=#=  	r<   c                 .    U R                   R                  $ r\   r  r.  r:  s    r:   r;  'Cohere2ForCausalLM.get_input_embeddings  s    zz&&&r<   c                 $    XR                   l        g r\   ru  r>  s     r:   r?  'Cohere2ForCausalLM.set_input_embeddings  s    "'

r<   c                     U R                   $ r\   rm  r:  s    r:   get_output_embeddings(Cohere2ForCausalLM.get_output_embeddings  s    ||r<   c                     Xl         g r\   rz  )r7   new_embeddingss     r:   set_output_embeddings(Cohere2ForCausalLM.set_output_embeddings  s    %r<   c                     Xl         g r\   r  )r7   decoders     r:   set_decoderCohere2ForCausalLM.set_decoder  s    
r<   c                     U R                   $ r\   r  r:  s    r:   get_decoderCohere2ForCausalLM.get_decoder  s    zzr<   rA  r   rU   r  rB  labelsr   r   rC  r   logits_to_keepr   r~   c                    Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	U R                  " SUUUUUUUU	U
S.	UD6nUR                  n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nUU R                  -  nSnUb)  U R                  " SUX`R                   R                  S.UD6n[        UUUR                  UR                  UR                  S9$ )a  
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
    (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>> from transformers import AutoTokenizer, Cohere2ForCausalLM

>> model = Cohere2ForCausalLM.from_pretrained("Cohere2ForAI/c4ai-command-r-v01")
>> tokenizer = AutoTokenizer.from_pretrained("Cohere2ForAI/c4ai-command-r-v01")

>> prompt = "Hey, are you conscious? Can you talk to me?"
>> inputs = tokenizer(prompt, return_tensors="pt")

>> # Generate
>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```N)	rA  r   rU   r  rB  r   r   rC  r   )ro  r  r-  )lossro  r  rx   rI  r   )r$   r   rC  r  rH  rK   r   slicerm  rq  loss_functionr-  r   r  rx   rI  )r7   rA  r   rU   r  rB  r  r   r   rC  r   r  r   r  rx   slice_indicesro  r  s                     r:   rZ   Cohere2ForCausalLM.forward  s(   N 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0:: ,
)%+'/!5),
 ,
  118B>SV8W8W~ot4]kmA}a,?@A$***%%pVF{{OeOepiopD%#33!//))
 	
r<   c	           
         Ub\  Uc  US   UR                   S   :  a  US S 2UR                   S   * S 24   nO)UR                   S   UR                   S   :w  a	  US S 2U4   nUbw  Uct  UR                  5       R                  S5      S-
  nUR                  US:H  S5        U(       a6  US S 2UR                   S   * S 24   nUR	                  [
        R                  S9nUb  US   S:X  a  US S.n
O UR	                  [
        R                  S9S S.n
[        U[        5      (       a  UR                  S:X  a  U R                  R                  S:X  d  U
S	   b"  U
S	   R                   u  pnU
S	   R                  nO U
S
   R                   u  pU
S
   R                  nU R                  R                  UUUR                  5       U R                   R"                  R$                  UUUS9nUb  XS'   U
R'                  UUUUUS.5        U
$ )Nr>   r   r   )memory_format)rB  rA  )rA  rB  rC   r   rB  rA  rX  r  )rU   r   r  r   r   )rJ   longcumsummasked_fill_rc  rM   contiguous_formatrK   r   ndimr$   r   r8   r  r\  r[  rm  rl   rG   r   )r7   rA  r  r   rB  r   rU   r   r  r   model_inputsrM  rY  rN  r8   s                  r:   prepare_inputs_for_generation0Cohere2ForCausalLM.prepare_inputs_for_generation  s-   & &)!"%);;%a.*>*>q*A)A)C&CD	#~';';A'>>%a&78	%,*>)..077;a?L%%n&91=+A	0B/B/D,DE  ,11@W@W1X $):a)?-:NL *3uG^G^)_rvwL 44##q(KK448KKO,81=o1N1T1T.
Q%o6==.:;.G.M.M+
%k299!ZZ]] /-AACll))//-% ^ N %-;)* ,"0#2&"0	
 r<   )rm  rq  r  rr  r-  )NNNNNNNNNNr   )NNNNNTN)"r]   r^   r_   r`   _tied_weights_keys_tp_plan_pp_planr   r-   r;  r?  r{  r  r  r  r   r   r   rM   r   r   r   r
   r   r  r   r   r   rj  r   rZ   r  rb   rc   rd   s   @r:   rl  rl  x  s   *+=)H_-z:;H	} 	'(&  151537KO59-1$(,0/35934H
E,,-H
 !.H
 u//0	H

 "%tE4E4E/F(F"GHH
   1 12H
 ))*H
 D>H
 $D>H
 'tnH
 !!1!12H
 c5<</0H
 *+H
 
 H
  H
Z Q Qr<   rl  )rl  r*  r  )r   )Nr   )Ctypingr   r   r   r   r   rM   torch.nnri   activationsr	   cache_utilsr
   r   r   
generationr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   utils.deprecationr   configuration_cohere2r   !torch.nn.attention.flex_attentionr   integrations.flex_attentionr    
get_loggerr]   r   Moduler"   rf   r   r   r   rH   r   r   r   r   r   r   r  r*  rj  rl  __all__r   r<   r:   <module>r     s  , : 9   ! : : ) B 9 O K F & h h 0 0  !!;J 
		H	%<RYY <D-ryy -"	UU\\ 	U# 	U%,, 	U& %II%<<% 
% <<	%
 U\\*% % %4<<W)ryy W)t  a4 aH *_ * *8 U) U Up ?,j > /  D Kr<   