o
    Zh'                     @   s   d dl mZmZmZmZmZ d dlZd dlmZ ddlm	Z	 ddl
mZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlmZ ddl m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z' e$ rd dl(m)Z) ddl*m+Z+ e%,e-Z.G dd dej/Z0G dd dej/Z1G dd dej/Z2dej3de4dej3fddZ5	d:d ej/d!ej3d"ej3d#ej3d$eej3 d%e6d&e6fd'd(Z7d)d* Z8d;d+d,Z9G d-d. d.ej/Z:G d/d0 d0eZ;e"G d1d2 d2eZ<e"G d3d4 d4e<Z=G d5d6 d6ee!Z>e"G d7d8 d8e<eZ?g d9Z@dS )<    )CallableListOptionalTupleUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )CohereConfig)	BlockMask)make_flex_block_causal_maskc                       s&   e Zd Zd fdd	Zdd Z  ZS )	CohereLayerNormNh㈵>Fc                    s&   t    tt|| _|| _dS )zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)super__init__r   	ParametertorchZonesweightvariance_epsilon)selfhidden_sizeepsbias	__class__ Y/var/www/auris/lib/python3.10/site-packages/transformers/models/cohere/modeling_cohere.pyr#   ;   s   

zCohereLayerNorm.__init__c                 C   sl   |j }|tj}|jddd}|| djddd}|| t|| j  }| jtj| }||S )NT)Zkeepdim   )	dtypetor%   float32meanpowZrsqrtr'   r&   )r(   hidden_statesZinput_dtyper5   Zvariancer.   r.   r/   forwardA   s   
zCohereLayerNorm.forward)Nr!   F__name__
__module____qualname__r#   r8   __classcell__r.   r.   r,   r/   r    :   s    r    c                       s8   e Zd Zddef fddZe edd Z  Z	S )CohereRotaryEmbeddingNconfigc                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r"   r#   hasattrr@   getrA   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenr?   r   Zrope_init_fnattention_scalingZregister_bufferrD   Zoriginal_inv_freq)r(   r?   devicerD   r,   r.   r/   r#   L   s   
zCohereRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd}|d d d d d f  }t|jjtr2|jjdkr2|jjnd}tj	|dd* | |  
dd}tj|ddd	}| | j }| | j }	W d    n1 shw   Y  |j|jd
|	j|jd
fS )Nr   r0   r   ZmpscpuF)device_typeenabledr1   dimr2   )rD   floatexpandshape
isinstancerI   rB   strr%   Zautocast	transposeZrepeat_interleavecosrH   sinr3   r2   )
r(   xposition_idsZinv_freq_expandedZposition_ids_expandedrK   ZfreqsZembrV   rW   r.   r.   r/   r8   ]   s   (&zCohereRotaryEmbedding.forwardN)
r:   r;   r<   r   r#   r%   Zno_gradr   r8   r=   r.   r.   r,   r/   r>   K   s
    r>   c                       s$   e Zd Z fddZdd Z  ZS )	CohereMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFr+   )r"   r#   r?   r)   Zintermediate_sizer   Linear	gate_projup_proj	down_projr	   Z
hidden_actact_fnr(   r?   r,   r.   r/   r#   n   s   
zCohereMLP.__init__c                 C   s$   |  | | || | }|S rZ   )ra   rb   r_   r`   )r(   rX   ra   r.   r.   r/   r8   x   s    zCohereMLP.forwardr9   r.   r.   r,   r/   r[   m   s    
r[   r7   n_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rR   rQ   reshape)r7   rd   batchnum_key_value_headsslenhead_dimr.   r.   r/   	repeat_kv}   s
   0rk           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr1   r   r0   )rN   r2   )ptrainingr   )rk   num_key_value_groupsr%   matmulrU   rR   r   Z
functionalZsoftmaxr4   r3   r2   rs   rv   
contiguous)rm   rn   ro   rp   rq   rr   rs   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr.   r.   r/   eager_attention_forward   s   
&r   c                 C   sB   | dd d df }| ddd df }t j| |gddd}|S )N.r1   r   r0   rM   rt   )r%   stackflatten)rX   x1Zx2Zrot_xr.   r.   r/   rotate_half   s   r   c           	      C   sj   | j }|  } | }||}||}| | t| |  }|| t||  }|j|d|j|dfS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    rO   )r2   rP   	unsqueezer   r3   )	qkrV   rW   rY   Zunsqueeze_dimr2   Zq_embedZk_embedr.   r.   r/   apply_rotary_pos_emb   s   

r   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )CohereAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNr?   	layer_idxc                    s  t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _|j| _| jrt|j| jf|jd| _t|j| jf|jd| _d S d S )Nrj   g      Tr]   r)   r*   )r"   r#   r?   r   getattrr)   Znum_attention_headsrj   rh   rw   rr   attention_dropoutZ	is_causalr   r^   Zattention_biasq_projk_projv_projo_projuse_qk_normr    layer_norm_epsq_normk_normr(   r?   r   r,   r.   r/   r#      s:   
zCohereAttention.__init__r7   position_embeddingsrq   past_key_valuecache_positionrz   re   c                 K   sn  |j d d }g |d| jR }| ||}	| ||}
| ||}| jr6| |	}	| |
}
|		dd}	|
	dd}
|	dd}|\}}t
|	|
||\}	}
|d urj|||d}||
|| j|\}
}t}| jjdkr| jjdkr|ddrtd	 nt| jj }|| |	|
||f| jsd
n| j| jd|\}}|jg |dR   }| |}||fS )Nr0   r   r1   )rW   rV   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.rl   )rs   rr   )rR   rj   r   viewr   r   r   r   r   rU   r   updater   r   r?   _attn_implementationrG   loggerwarning_oncer   rv   r   rr   rf   ry   r   )r(   r7   r   rq   r   r   rz   Zinput_shapeZhidden_shapeZquery_statesr{   r|   rV   rW   Zcache_kwargsZattention_interfacer   r}   r.   r.   r/   r8      sL   	



zCohereAttention.forwardrZ   )NN)r:   r;   r<   __doc__r   r   intr#   r%   Tensorr   r
   
LongTensorr   r   r8   r=   r.   r.   r,   r/   r      s(    %r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )CohereDecoderLayerr?   r   c                    s@   t    |j| _t||d| _t|| _t|j|jd| _	d S )N)r?   r   r   )
r"   r#   r)   r   	self_attnr[   mlpr    r   input_layernormr   r,   r.   r/   r#   '  s
   

zCohereDecoderLayer.__init__NFr7   rq   rY   r   r   	use_cacher   r   rz   re   c	                 K   sb   |}
|  |}| jd||||||||d|	\}}| |}|
| | }|f}|r/||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )r7   rq   rY   r   r   r   r   r   Nr.   )r   r   r   )r(   r7   rq   rY   r   r   r   r   r   rz   ZresidualZhidden_states_attentionZself_attn_weightsZhidden_states_mlpoutputsr.   r.   r/   r8   .  s(   
	


zCohereDecoderLayer.forward)NNNFFNN)r:   r;   r<   r   r   r#   r%   r   r   r   r
   boolr   r   r   FloatTensorr8   r=   r.   r.   r,   r/   r   &  s<    
	
r   c                   @   sH   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdd ZdS )CoherePreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nrl   )r5   stdg      ?)r?   Zinitializer_rangerS   r   r^   r&   dataZnormal_r+   Zzero_	Embeddingpadding_idxr    Zfill_)r(   rm   r   r.   r.   r/   _init_weightsz  s   


z#CoherePreTrainedModel._init_weightsN)r:   r;   r<   r   Zconfig_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attn_2Z_supports_sdpaZ_supports_flex_attnZ_supports_cache_classZ_supports_quantized_cacheZ_supports_static_cacheZ_supports_attention_backendr   r.   r.   r.   r/   r   k  s    r   c                       s  e Zd Zdef fddZdd Zdd Zee									d!d	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ	d"d
ee
jdf de
jde
jdedef
ddZed
e
jdedede
jde
jdefdd Z  ZS )#CohereModelr?   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r.   )r   ).0r   r?   r.   r/   
<listcomp>  s    z(CohereModel.__init__.<locals>.<listcomp>r   r   F)r"   r#   Zpad_token_idr   
vocab_sizer   r   r)   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr    r   normr>   
rotary_embgradient_checkpointing	post_initrc   r,   r   r/   r#     s   zCohereModel.__init__c                 C      | j S rZ   r   r(   r.   r.   r/   get_input_embeddings     z CohereModel.get_input_embeddingsc                 C   
   || _ d S rZ   r   r(   rp   r.   r.   r/   set_input_embeddings     
z CohereModel.set_input_embeddingsN	input_idsrq   rY   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsre   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}| |||	||}|}| ||}|rdnd }|rdnd }| jd | j j D ]&}|r||f7 }||f||||||	|d	|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||d
S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   rI   r.   )rq   rY   r   r   r   r   r   )last_hidden_stater   r7   
attentions)r?   r   r   r   
ValueErrorr   rv   r   r   rS   rB   r
   r   r   get_seq_lengthr%   arangerR   rI   r   _update_causal_maskr   r   r   r   r   )r(   r   rq   rY   r   r   r   r   r   r   r   past_seen_tokensr~   r7   r   Zall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsr.   r.   r/   r8     sx   



	


zCohereModel.forwardFr   input_tensorc                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )NZflash_attention_2rl   Zflex_attentionr   Fr   )r   Zpast_key_values_lengthZis_trainingr   r0   )sequence_lengthtarget_lengthr2   r   
batch_size)cudaZxpuZnpu)r?   r   anyrS   r%   r   r   r   Zis_compileabler   Z_ignore_causal_mask_sdparv   r2   rR   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionrI   rB   finfominZ_unmask_unattended)r(   rq   r   r   r   r   r   Zusing_compilable_cacher2   r   r   r~   	min_dtyper.   r.   r/   r      sT   




zCohereModel._update_causal_maskr   r   r2   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuer2   rI   r   )Zdiagonalr   r0   r   )rN   r%   r   r   fullrI   Ztriur   rf   rQ   clonerR   r3   Zmasked_fill)rq   r   r   r2   r   r   rz   r~   r   Zmask_lengthZpadding_maskr.   r.   r/   r   D  s,    $
6  zACohereModel._prepare_4d_causal_attention_mask_with_cache_position)	NNNNNNNNN)F)r:   r;   r<   r   r#   r   r   r   r   r   r%   r   r   r
   r   r   r   r   r   r8   r   r   staticmethodr   r2   r   r=   r.   r.   r,   r/   r     s    	
d
Dr   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r:   r;   r<   r.   r.   r.   r/   r   |  s    r   c                       s  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej deeeeej f  deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&CohereForCausalLMzlm_head.weightlm_headZcolwise_repr7   logitsc                    sP   t  | t|| _|j| _tj|j|jdd| _|j	| _	|j
| _
|   d S r\   )r"   r#   r   r   r   r   r^   r)   r   logit_scaleZtie_word_embeddingsr   rc   r,   r.   r/   r#     s   
zCohereForCausalLM.__init__c                 C   s   | j jS rZ   r   r   r   r.   r.   r/   r     s   z&CohereForCausalLM.get_input_embeddingsc                 C   s   || j _d S rZ   r   r   r.   r.   r/   r     s   z&CohereForCausalLM.set_input_embeddingsc                 C   r   rZ   r   r   r.   r.   r/   get_output_embeddings  r   z'CohereForCausalLM.get_output_embeddingsc                 C   r   rZ   r   )r(   Znew_embeddingsr.   r.   r/   set_output_embeddings  r   z'CohereForCausalLM.set_output_embeddingsc                 C   r   rZ   r   )r(   decoderr.   r.   r/   set_decoder  r   zCohereForCausalLM.set_decoderc                 C   r   rZ   r   r   r.   r.   r/   get_decoder  r   zCohereForCausalLM.get_decoderNr   r   rq   rY   r   r   labelsr   r   r   r   logits_to_keeprz   re   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }|| j	 }d}|dur]| j
d||| j jd|}t|||j|j|jdS )az  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >> from transformers import AutoTokenizer, CohereForCausalLM

        >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
        >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")

        >> prompt = "Hey, are you conscious? Can you talk to me?"
        >> inputs = tokenizer(prompt, return_tensors="pt")

        >> # Generate
        >> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rq   rY   r   r   r   r   r   r   )r   r   r   )lossr   r   r7   r   r.   )r?   r   r   r   r   rS   r   slicer   r   Zloss_functionr   r   r   r7   r   )r(   r   rq   rY   r   r   r   r   r   r   r   r   rz   r   r7   Zslice_indicesr   r   r.   r.   r/   r8     s<   '

zCohereForCausalLM.forward)NNNNNNNNNNr   )r:   r;   r<   Z_tied_weights_keysZ_tp_planZ_pp_planr#   r   r   r   r   r   r   r   r   r   r%   r   r   r   r
   r   r   r   r   r   r   r   r8   r=   r.   r.   r,   r/   r     sf    	
r   )r   r   r   )rl   )Nr   )Atypingr   r   r   r   r   r%   r   Zactivationsr	   Zcache_utilsr
   r   Z
generationr   Zmodeling_attn_mask_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   r   Zconfiguration_coherer   Z!torch.nn.attention.flex_attentionr   Zintegrations.flex_attentionr   Z
get_loggerr:   r   Moduler    r>   r[   r   r   rk   rP   r   r   r   r   r   r   r   r   r   __all__r.   r.   r.   r/   <module>   sh   
"

]E to