o
    ZhMv                     @   s.  d dl mZmZmZmZ d dlZd dlmZ d dlm  m	Z
 ddlmZ ddlmZmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZmZ ddlmZm Z  ddl!m"Z" ddl#m$Z$m%Z%m&Z&m'Z'm(Z( ddl)m*Z* e' rd dl+m,Z, ddl-m.Z. e(/e0Z1G dd dej2Z3G dd dej2Z4dd Z5dej6de7dej6fddZ8	d9dej2d ej6d!ej6d"ej6d#eej6 d$e9d%e9fd&d'Z:d:d(d)Z;G d*d+ d+ej2Z<G d,d- d-eZ=G d.d/ d/ej2Z>e%G d0d1 d1e Z?e%G d2d3 d3e?Z@G d4d5 d5ee$ZAe%G d6d7 d7e?eZBg d8ZCdS );    )CallableOptionalTupleUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )
OlmoConfig)	BlockMask)make_flex_block_causal_maskc                       s@   e Zd ZdZdeddf fddZdejdejfdd	Z  Z	S )
OlmoLayerNormz/LayerNorm but with no learnable weight or bias.hidden_sizereturnNc                    s   t    |f| _d S N)super__init__normalized_shape)selfr   	__class__ U/var/www/auris/lib/python3.10/site-packages/transformers/models/olmo/modeling_olmo.pyr#   '   s   
zOlmoLayerNorm.__init__hidden_statesc                 C   s,   |j }tj|jtjd| jd d dd|S )N)dtypegh㈵>)Zeps)r+   FZ
layer_normtotorchfloat32r$   )r%   r*   Z
orig_dtyper(   r(   r)   forward+   s    zOlmoLayerNorm.forward)
__name__
__module____qualname____doc__intr#   r.   Tensorr0   __classcell__r(   r(   r&   r)   r   $   s    r   c                       s$   e Zd Z fddZdd Z  ZS )OlmoMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFbias)r"   r#   configr   Zintermediate_sizennLinear	gate_projup_proj	down_projr   Z
hidden_actact_fnr%   r<   r&   r(   r)   r#   3   s   
zOlmoMLP.__init__c                 C   s$   |  | | || | }|S r!   )rA   rB   r?   r@   )r%   xrA   r(   r(   r)   r0   =   s    zOlmoMLP.forward)r1   r2   r3   r#   r0   r7   r(   r(   r&   r)   r8   2   s    
r8   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shaper.   cat)rD   x1Zx2r(   r(   r)   rotate_halfB   s   rL   r*   n_repr    c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rI   expandreshape)r*   rM   batchnum_key_value_headsslenhead_dimr(   r(   r)   	repeat_kvI   s
   0rT           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )NrF   r   rE   )rH   r+   )ptrainingr   )rT   num_key_value_groupsr.   matmul	transposerI   r=   
functionalZsoftmaxr/   r-   r+   r\   r_   
contiguous)rV   rW   rX   rY   rZ   r[   r\   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr(   r(   r)   eager_attention_forwardU   s   
&rk   c           
      C   s^   | j |j }}||}||}| | t| |  }|| t||  }	|||	|fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )r+   	unsqueezerL   r-   )
qkcossinposition_idsZunsqueeze_dimZq_typeZk_typeZq_embedZk_embedr(   r(   r)   apply_rotary_pos_embo   s   

rr   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	e
ej d
e
e de
ej de	eje
ej e
e	ej  f fddZ  ZS )OlmoAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr<   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )NrS   g      Tr:   )r"   r#   r<   rt   getattrr   Znum_attention_headsrS   rQ   r`   r[   attention_dropoutZ	is_causalr=   r>   Zattention_biasq_projk_projv_projo_projr%   r<   rt   r&   r(   r)   r#      s(   
zOlmoAttention.__init__Nr*   position_embeddingsrZ   past_key_valuecache_positionr    c                 K   s  |j d d }g |d| jR }| |}	| |}
| |}| jjd urJ|	j| jj | jjd |
j| jj | jjd |j| jj | jjd |	|	dd}	|
|	dd}
||	dd}|\}}t
|	|
||\}	}
|d ur|||d}||
|| j|\}
}t}| jjdkr| jjdkr|dd	rtd
 nt| jj }|| |	|
||f| jsdn| j| jd|\}}|jg |dR   }| |}||fS )NrE   )minmaxr   rF   )rp   ro   r~   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.rU   )r\   r[   )rI   rS   rw   rx   ry   r<   Zclip_qkvZclamp_viewrb   rr   updatert   rk   _attn_implementationgetloggerwarning_oncer   r_   rv   r[   rO   rd   rz   )r%   r*   r|   rZ   r}   r~   re   Zinput_shapeZhidden_shapeZquery_statesrf   rg   ro   rp   Zcache_kwargsZattention_interfacerj   rh   r(   r(   r)   r0      sN   	




zOlmoAttention.forward)NN)r1   r2   r3   r4   r   r5   r#   r.   r6   r   r   r   
LongTensorr0   r7   r(   r(   r&   r)   rs      s$    rs   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )OlmoDecoderLayerr<   rt   c                    sF   t    |j| _t||d| _t|| _t|j| _t|j| _	d S )N)r<   rt   )
r"   r#   r   rs   	self_attnr8   mlpr   input_layernormpost_attention_layernormr{   r&   r(   r)   r#      s   

zOlmoDecoderLayer.__init__NFr*   rZ   rq   r}   r   	use_cacher~   r|   re   r    c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)r*   rZ   rq   r}   r   r   r~   r|   r(   )r   r   r   r   )r%   r*   rZ   rq   r}   r   r   r~   r|   re   ZresidualZself_attn_weightsoutputsr(   r(   r)   r0      s.   
	



zOlmoDecoderLayer.forward)NNNFFNN)r1   r2   r3   r   r5   r#   r.   r6   r   r   r   boolr   r   r   FloatTensorr0   r7   r(   r(   r&   r)   r      s<    	
r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )OlmoRotaryEmbeddingNr<   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r"   r#   hasattrr   r   r   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenr<   r   Zrope_init_fnattention_scalingZregister_bufferr   Zoriginal_inv_freq)r%   r<   devicer   r&   r(   r)   r#     s   
zOlmoRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd/ | |  dd}t	j||fdd	}| | j }| | j }	||	fW  d    S 1 sqw   Y  d S )
Nr   rE   r   ZmpscpuF)device_typeenabledrF   rG   )r   floatrN   rI   r-   r   
isinstancer   strr.   Zautocastrb   rJ   ro   r   rp   )
r%   rD   rq   Zinv_freq_expandedZposition_ids_expandedr   ZfreqsZembro   rp   r(   r(   r)   r0   &  s   0&$zOlmoRotaryEmbedding.forwardr!   )
r1   r2   r3   r   r#   r.   Zno_gradr   r0   r7   r(   r(   r&   r)   r     s
    r   c                   @   sH   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdd ZdS )OlmoPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rA|jjjd|d |jd urC|jj|j 	  d S d S d S )NrU   )meanstd)r<   Zinitializer_ranger   r=   r>   weightdataZnormal_r;   Zzero_	Embeddingpadding_idx)r%   rV   r   r(   r(   r)   _init_weightsD  s   

z!OlmoPreTrainedModel._init_weightsN)r1   r2   r3   r   Zconfig_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attn_2Z_supports_sdpaZ_supports_flex_attnZ_supports_cache_classZ_supports_quantized_cacheZ_supports_static_cacheZ_supports_attention_backendr   r(   r(   r(   r)   r   5  s    r   c                       s  e Zd Zdef fddZdd Zdd Zee									d!d	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ	d"d
ee
jdf de
jde
jdedef
ddZed
e
jdedede
jde
jdefdd Z  ZS )#	OlmoModelr<   c                    s|   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r(   )r   ).0rt   r<   r(   r)   
<listcomp>Y  s    z&OlmoModel.__init__.<locals>.<listcomp>r   F)r"   r#   Zpad_token_idr   
vocab_sizer=   r   r   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr   normr   
rotary_embgradient_checkpointing	post_initrC   r&   r   r)   r#   R  s   zOlmoModel.__init__c                 C      | j S r!   r   r%   r(   r(   r)   get_input_embeddingsb     zOlmoModel.get_input_embeddingsc                 C   
   || _ d S r!   r   r%   rY   r(   r(   r)   set_input_embeddingse     
zOlmoModel.set_input_embeddingsN	input_idsrZ   rq   r   inputs_embedsr   r   output_hidden_statesr~   flash_attn_kwargsr    c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}| |||	||}|}| ||}|rdnd }|rdnd }| jd | j j D ]&}|r||f7 }||f||||||	|d	|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||d
S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   r   r(   )rZ   rq   r}   r   r   r~   r|   )last_hidden_stater   r*   
attentions)r<   r   r   r   
ValueErrorr   r_   r   r   r   r   r   r   r	   get_seq_lengthr.   arangerI   r   rl   _update_causal_maskr   r   r   r   r   )r%   r   rZ   rq   r   r   r   r   r   r~   r   past_seen_tokensri   r*   r|   Zall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsr(   r(   r)   r0   h  sx   



	


zOlmoModel.forwardFr   input_tensorc                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )NZflash_attention_2rU   Zflex_attentionr   Fr   )r   Zpast_key_values_lengthZis_trainingr   rE   )sequence_lengthtarget_lengthr+   r~   
batch_size)cudaZxpuZnpu)r<   r   anyr   r.   r6   r   r   Zis_compileabler   Z_ignore_causal_mask_sdpar_   r+   rI   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   r   finfor   Z_unmask_unattended)r%   rZ   r   r~   r   r   r   Zusing_compilable_cacher+   r   r   ri   	min_dtyper(   r(   r)   r     sT   




zOlmoModel._update_causal_maskr   r   r+   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuer+   r   r   )Zdiagonalr   rE   r   )rH   r.   r   r   fullr   Ztriur   rO   rN   clonerI   r-   Zmasked_fill)rZ   r   r   r+   r~   r   re   ri   r   Zmask_lengthZpadding_maskr(   r(   r)   r     s,    $
6  z?OlmoModel._prepare_4d_causal_attention_mask_with_cache_position)	NNNNNNNNN)F)r1   r2   r3   r   r#   r   r   r   r   r   r.   r   r6   r   r   r   r   r   r   r0   r   r   staticmethodr5   r+   r   r7   r(   r(   r&   r)   r   P  s    	
d
Dr   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r1   r2   r3   r(   r(   r(   r)   r   D  s    r   c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&OlmoForCausalLMzlm_head.weightlm_headZcolwise_repr*   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S r9   )
r"   r#   r   r   r   r=   r>   r   r   r   rC   r&   r(   r)   r#   M  s
   
zOlmoForCausalLM.__init__c                 C   s   | j jS r!   r   r   r   r(   r(   r)   r   V  s   z$OlmoForCausalLM.get_input_embeddingsc                 C   s   || j _d S r!   r   r   r(   r(   r)   r   Y  s   z$OlmoForCausalLM.set_input_embeddingsc                 C   r   r!   r   r   r(   r(   r)   get_output_embeddings\  r   z%OlmoForCausalLM.get_output_embeddingsc                 C   r   r!   r   )r%   Znew_embeddingsr(   r(   r)   set_output_embeddings_  r   z%OlmoForCausalLM.set_output_embeddingsc                 C   r   r!   r   )r%   decoderr(   r(   r)   set_decoderb  r   zOlmoForCausalLM.set_decoderc                 C   r   r!   r   r   r(   r(   r)   get_decodere  r   zOlmoForCausalLM.get_decoderNr   r   rZ   rq   r   r   labelsr   r   r   r~   logits_to_keepre   r    c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )an  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, OlmoForCausalLM

        >>> model = OlmoForCausalLM.from_pretrained("meta-olmo/Olmo-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo/Olmo-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rZ   rq   r   r   r   r   r   r~   )r   r   r   )lossr   r   r*   r   r(   )r<   r   r   r   r   r   r5   slicer   Zloss_functionr   r   r   r*   r   )r%   r   rZ   rq   r   r   r   r   r   r   r~   r   re   r   r*   Zslice_indicesr   r   r(   r(   r)   r0   h  s:   '
zOlmoForCausalLM.forward)NNNNNNNNNNr   )r1   r2   r3   Z_tied_weights_keysZ_tp_planZ_pp_planr#   r   r   r   r   r   r   r   r   r   r.   r   r6   r   r   r   r   r5   r   r   r   r0   r7   r(   r(   r&   r)   r   G  sf    		
r   )r   r   r   )rU   )Nr   )Dtypingr   r   r   r   r.   Ztorch.nnr=   Ztorch.nn.functionalrc   r,   Zactivationsr   Zcache_utilsr   r	   Z
generationr
   Zmodeling_attn_mask_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   r   Zconfiguration_olmor   Z!torch.nn.attention.flex_attentionr   Zintegrations.flex_attentionr   Z
get_loggerr1   r   Moduler   r8   rL   r6   r5   rT   r   rk   rr   rs   r   r   r   r   r   r   __all__r(   r(   r(   r)   <module>   sj   


U4! tl