o
    ZhL                     @   s@  d dl mZmZmZ d dlZd dlmZ d dlm  mZ	 d dl
ZddlmZ ddlmZ ddlmZ ddlmZmZmZmZmZmZmZmZmZ d	d
lmZ eeZG dd dej Z!G dd deZ"dddZ#G dd deZ$G dd deZ%G dd deZ&G dd deZ'G dd deZ(G dd deZ)g dZ*dS )    )CallableOptionalTupleN   )Cache)ALL_ATTENTION_FUNCTIONS)logging   )	LlamaAttentionLlamaDecoderLayerLlamaForCausalLMLlamaMLP
LlamaModelLlamaPreTrainedModelLlamaRotaryEmbeddingeager_attention_forwardrotate_half   )
OlmoConfigc                       s@   e Zd ZdZdeddf fddZdejdejfdd	Z  Z	S )
OlmoLayerNormz/LayerNorm but with no learnable weight or bias.hidden_sizereturnNc                    s   t    |f| _d S )N)super__init__normalized_shape)selfr   	__class__ T/var/www/auris/lib/python3.10/site-packages/transformers/models/olmo/modular_olmo.pyr      s   
zOlmoLayerNorm.__init__hidden_statesc                 C   s,   |j }tj|jtjd| jd d dd|S )N)dtypegh㈵>)Zeps)r!   FZ
layer_normtotorchZfloat32r   )r   r    Z
orig_dtyper   r   r   forward#   s    zOlmoLayerNorm.forward)
__name__
__module____qualname____doc__intr   r$   Tensorr%   __classcell__r   r   r   r   r      s    r   c                       s   e Zd Z fddZ  ZS )OlmoMLPc                    sR   t  | tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _d S )NF)bias)	r   r   nnLinearr   Zintermediate_sizeZ	gate_projZup_projZ	down_projr   configr   r   r   r   +   s   zOlmoMLP.__init__)r&   r'   r(   r   r,   r   r   r   r   r-   *   s    r-   c           
      C   s^   | j |j }}||}||}| | t| |  }|| t||  }	|||	|fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )r!   Z	unsqueezer   r#   )
qkcossinposition_idsZunsqueeze_dimZq_typeZk_typeZq_embedZk_embedr   r   r   apply_rotary_pos_emb2   s   

r8   c                   @   sn   e Zd Z		d
dejdeejejf deej dee deej deejeej eeej  f fdd	Z	dS )OlmoAttentionNr    position_embeddingsattention_maskpast_key_valuecache_positionr   c                 K   s  |j d d }g |d| jR }| |}	| |}
| |}| jjd urJ|	j| jj | jjd |
j| jj | jjd |j| jj | jjd |	|	dd}	|
|	dd}
||	dd}|\}}t
|	|
||\}	}
|d ur|||d}||
|| j|\}
}t}| jjdkr| jjdkr|dd	rtd
 nt| jj }|| |	|
||f| jsdn| j| jd|\}}|jg |dR   }| |}||fS )N)minmaxr   r	   )r6   r5   r=   eagerZsdpaZoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )Zdropoutscaling)shapeZhead_dimZq_projZk_projZv_projr2   Zclip_qkvZclamp_view	transposer8   update	layer_idxr   Z_attn_implementationgetloggerZwarning_oncer   ZtrainingZattention_dropoutrC   Zreshape
contiguousZo_proj)r   r    r:   r;   r<   r=   kwargsZinput_shapeZhidden_shapeZquery_statesZ
key_statesZvalue_statesr5   r6   Zcache_kwargsZattention_interfaceZattn_outputZattn_weightsr   r   r   r%   O   sN   	




zOlmoAttention.forward)NN)
r&   r'   r(   r$   r+   r   r   r   Z
LongTensorr%   r   r   r   r   r9   N   s     r9   c                       s&   e Zd Zdedef fddZ  ZS )OlmoDecoderLayerr2   rH   c                    s8   t  || t|j| _t|j| _t||d| _d S )N)r2   rH   )r   r   r   r   Zinput_layernormZpost_attention_layernormr9   Z	self_attn)r   r2   rH   r   r   r   r      s   zOlmoDecoderLayer.__init__)r&   r'   r(   r   r*   r   r,   r   r   r   r   rM      s    rM   c                   @      e Zd Zdd ZdS )OlmoRotaryEmbeddingc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd/ | |  dd}t	j||fdd	}| | j }| | j }	||	fW  d    S 1 sqw   Y  d S )
Nr   r>   r   ZmpscpuF)device_typeenabledr	   )dim)Zinv_freqfloatexpandrD   r#   Zdevice
isinstancetypestrr$   ZautocastrF   catr5   Zattention_scalingr6   )
r   xr7   Zinv_freq_expandedZposition_ids_expandedrQ   ZfreqsZembr5   r6   r   r   r   r%      s   0&$zOlmoRotaryEmbedding.forwardN)r&   r'   r(   r%   r   r   r   r   rO          rO   c                   @   rN   )OlmoPreTrainedModelc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rA|jjjd|d |jd urC|jj|j 	  d S d S d S )NrB   )meanstd)r2   Zinitializer_rangerV   r/   r0   weightdataZnormal_r.   Zzero_Z	EmbeddingZpadding_idx)r   moduler^   r   r   r   _init_weights   s   

z!OlmoPreTrainedModel._init_weightsN)r&   r'   r(   rb   r   r   r   r   r\      r[   r\   c                       s"   e Zd Zdef fddZ  ZS )	OlmoModelr2   c                    s<   t    t fddt jD | _t j| _	d S )Nc                    s   g | ]}t  |qS r   )rM   ).0rH   r2   r   r   
<listcomp>   s    z&OlmoModel.__init__.<locals>.<listcomp>)
r   r   r/   Z
ModuleListrangeZnum_hidden_layersZlayersr   r   Znormr1   r   re   r   r      s
   zOlmoModel.__init__)r&   r'   r(   r   r   r,   r   r   r   r   rc      s    rc   c                   @   s   e Zd ZdS )OlmoForCausalLMN)r&   r'   r(   r   r   r   r   rh      s    rh   )rh   rc   r\   )Nr   )+typingr   r   r   r$   Ztorch.nnr/   Ztorch.nn.functionalZ
functionalr"   Ztorch.utils.checkpointZcache_utilsr   Zmodeling_utilsr   utilsr   Zllama.modeling_llamar
   r   r   r   r   r   r   r   r   Zconfiguration_olmor   Z
get_loggerr&   rJ   Moduler   r-   r8   r9   rM   rO   r\   rc   rh   __all__r   r   r   r   <module>   s*    ,

<
	