o
    Zhb                     @   s4  d dl mZmZmZmZmZ d dlZd dlmZ ddlm	Z	 ddl
mZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZmZ ddlmZmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z%m&Z&m'Z' ddl(m)Z) e& rd dl*m+Z+ ddl,m-Z- e'.e/Z0dd Z1d<ddZ2dej3de4dej3fddZ5	d=dej6d ej3d!ej3d"ej3d#eej3 d$e7d%e7fd&d'Z8G d(d) d)ej6Z9ed*G d+d, d,ej6Z:G d-d. d.ej6Z;G d/d0 d0eZ<e$G d1d2 d2eZ=G d3d4 d4ej6Z>e$G d5d6 d6e=Z?G d7d8 d8ee#Z@e$G d9d: d:e=eZAg d;ZBdS )>    )CallableListOptionalTupleUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )GraniteConfig)	BlockMask)make_flex_block_causal_maskc                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shapetorchcat)xx1Zx2 r*   [/var/www/auris/lib/python3.10/site-packages/transformers/models/granite/modeling_granite.pyrotate_half3   s   r,   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer,   )qkcossinposition_idsZunsqueeze_dimZq_embedZk_embedr*   r*   r+   apply_rotary_pos_emb:   s
   

r3   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r%   expandreshape)r4   r5   batchnum_key_value_headsslenhead_dimr*   r*   r+   	repeat_kvU   s
   0r=           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr"   r   r!   )r$   dtype)ptrainingr   )r=   num_key_value_groupsr&   matmul	transposer%   r   Z
functionalZsoftmaxfloat32torG   rE   rI   
contiguous)r?   r@   rA   rB   rC   rD   rE   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr*   r*   r+   eager_attention_forwarda   s   
&rV   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )GraniteAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNconfig	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	|j
| _|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )Nr<   Tbias)super__init__rX   rY   getattrhidden_sizeZnum_attention_headsr<   r:   rJ   Zattention_multiplierrD   attention_dropoutZ	is_causalr   LinearZattention_biasq_projk_projv_projo_projselfrX   rY   	__class__r*   r+   r]   ~   s(   
zGraniteAttention.__init__r4   position_embeddingsrC   past_key_valuecache_positionrP   r6   c                 K   sH  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkrw| jjdkrq|ddrqtd	 nt| jj }|| |	|
||f| jsd
n| j| jd|\}}|jg |dR   }| |}||fS )Nr!   r   r"   )r1   r0   rl   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.r>   )rE   rD   )r%   r<   rb   viewrL   rc   rd   r3   updaterY   rV   rX   _attn_implementationgetloggerwarning_oncer   rI   r`   rD   r8   rO   re   )rg   r4   rj   rC   rk   rl   rP   Zinput_shapeZhidden_shapeZquery_statesrQ   rR   r0   r1   Zcache_kwargsZattention_interfacerU   rS   r*   r*   r+   forward   s@   	

zGraniteAttention.forwardN)NN)__name__
__module____qualname____doc__r   r   intr]   r&   Tensorr   r
   
LongTensorr   r   rv   __classcell__r*   r*   rh   r+   rW   {   s(    rW   ZRMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	GraniteRMSNormư>c                    s&   t    tt|| _|| _dS )z=
        GraniteRMSNorm is equivalent to T5LayerNorm
        N)r\   r]   r   	Parameterr&   Zonesweightvariance_epsilon)rg   r_   epsrh   r*   r+   r]      s   

zGraniteRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr"   r!   T)Zkeepdim)	rG   rN   r&   rM   powmeanZrsqrtr   r   )rg   r4   Zinput_dtypeZvariancer*   r*   r+   rv      s
   zGraniteRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler   r%   r   rg   r*   r*   r+   
extra_repr   s   zGraniteRMSNorm.extra_repr)r   )rx   ry   rz   r]   rv   r   r   r*   r*   rh   r+   r      s    r   c                       s$   e Zd Z fddZdd Z  ZS )
GraniteMLPc                    sx   t    || _|j| _|j| _tj| j| j|jd| _tj| j| j|jd| _	tj| j| j|jd| _
t|j | _d S )NrZ   )r\   r]   rX   r_   Zintermediate_sizer   ra   Zmlp_bias	gate_projup_proj	down_projr	   Z
hidden_actact_fnrg   rX   rh   r*   r+   r]      s   
zGraniteMLP.__init__c                 C   s$   |  | | || | }|S rw   )r   r   r   r   )rg   r(   r   r*   r*   r+   rv      s    zGraniteMLP.forward)rx   ry   rz   r]   rv   r   r*   r*   rh   r+   r      s    
r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  deejeeejejf  f fddZ  ZS )GraniteDecoderLayerrX   rY   c                    sZ   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|j| _d S )N)rX   rY   r   )r\   r]   r_   rW   	self_attnr   mlpr   rms_norm_epsinput_layernormpost_attention_layernormresidual_multiplierrf   rh   r*   r+   r]      s   

zGraniteDecoderLayer.__init__NFr4   rC   r2   rk   ro   	use_cacherl   rj   r6   c	                 K   s   |}
|  |}| jd||||||||d|	\}}|
|| j  }|}
| |}| |}|
|| j  }|f}|r>||f7 }|S )a.  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
                into the model
        )r4   rC   r2   rk   ro   r   rl   rj   Nr*   )r   r   r   r   r   )rg   r4   rC   r2   rk   ro   r   rl   rj   rP   ZresidualZself_attn_weightsoutputsr*   r*   r+   rv      s.   "
	



zGraniteDecoderLayer.forward)NNNFFNN)rx   ry   rz   r   r|   r]   r&   r}   r   r~   r
   boolr   FloatTensorrv   r   r*   r*   rh   r+   r      s8    	r   c                   @   sH   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdd ZdS )GranitePreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nr>   )r   stdg      ?)rX   Zinitializer_range
isinstancer   ra   r   dataZnormal_r[   Zzero_	Embeddingpadding_idxr   Zfill_)rg   r?   r   r*   r*   r+   _init_weightsI  s   


z$GranitePreTrainedModel._init_weightsN)rx   ry   rz   r   Zconfig_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attn_2Z_supports_sdpaZ_supports_flex_attnZ_supports_cache_classZ_supports_quantized_cacheZ_supports_static_cacheZ_supports_attention_backendr   r*   r*   r*   r+   r   :  s    r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )GraniteRotaryEmbeddingNrX   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r\   r]   hasattrr   rs   r   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenrX   r   Zrope_init_fnattention_scalingZregister_bufferr   Zoriginal_inv_freq)rg   rX   devicer   rh   r*   r+   r]   X  s   
zGraniteRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r!   r   ZmpscpuF)device_typeenabledr"   r#   )rG   )r   floatr7   r%   rN   r   r   r   strr&   ZautocastrL   r'   r0   r   r1   rG   )
rg   r(   r2   Zinv_freq_expandedZposition_ids_expandedr   ZfreqsZembr0   r1   r*   r*   r+   rv   i  s   0&zGraniteRotaryEmbedding.forwardrw   )
rx   ry   rz   r   r]   r&   Zno_gradr   rv   r   r*   r*   rh   r+   r   W  s
    r   c                       s  e Zd Zdef fddZdd Zdd Zee									d!d	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ	d"d
ee
jdf de
jde
jdedef
ddZed
e
jdedede
jde
jdefdd Z  ZS )#GraniteModelrX   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _ j| _|   d S )Nc                    s   g | ]}t  |qS r*   )r   ).0rY   rX   r*   r+   
<listcomp>  s    z)GraniteModel.__init__.<locals>.<listcomp>r   r   F)r\   r]   Zpad_token_idr   
vocab_sizer   r   r_   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointingembedding_multiplier	post_initr   rh   r   r+   r]   {  s   zGraniteModel.__init__c                 C      | j S rw   r   r   r*   r*   r+   get_input_embeddings     z!GraniteModel.get_input_embeddingsc                 C   
   || _ d S rw   r   rg   rB   r*   r*   r+   set_input_embeddings     
z!GraniteModel.set_input_embeddingsN	input_idsrC   r2   r   inputs_embedsr   ro   output_hidden_statesrl   flash_attn_kwargsr6   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|| j
 }|rP|d u rPt }|	d u rl|d ur\| nd}tj|||jd  |jd}	|d u ru|	d}| |||	||}|}| ||}|rdnd }|rdnd }| jd | j j D ]&}|r||f7 }||f||||||	|d|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||d	S )
Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   r   r*   )rC   r2   rk   ro   r   rl   rj   )last_hidden_stater   r4   
attentions)rX   ro   r   r   
ValueErrorr   rI   rt   ru   r   r   r   get_seq_lengthr&   aranger%   r   r-   _update_causal_maskr   r   r   r   r   )rg   r   rC   r2   r   r   r   ro   r   rl   r   past_seen_tokensrT   r4   rj   Zall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsr*   r*   r+   rv     sv   




	


zGraniteModel.forwardFr   input_tensorc                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )NZflash_attention_2r>   Zflex_attentionr   Frn   )r   Zpast_key_values_lengthZis_trainingr   r!   )sequence_lengthtarget_lengthrG   rl   
batch_size)cudaZxpuZnpu)rX   rr   anyr   r&   r}   r    r   Zis_compileabler   Z_ignore_causal_mask_sdparI   rG   r%   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   r   finfominZ_unmask_unattended)rg   rC   r   rl   r   ro   r   Zusing_compilable_cacherG   r   r   rT   	min_dtyper*   r*   r+   r     sT   




z GraniteModel._update_causal_maskr   r   rG   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuerG   r   r   )Zdiagonalr   r!   r   )r$   r&   r   r   fullr   Ztriur   r8   r7   cloner%   rN   Zmasked_fill)rC   r   r   rG   rl   r   rP   rT   r   Zmask_lengthZpadding_maskr*   r*   r+   r   4  s,    $
6  zBGraniteModel._prepare_4d_causal_attention_mask_with_cache_position)	NNNNNNNNN)F)rx   ry   rz   r   r]   r   r   r   r   r   r&   r~   r}   r
   r   r   r   r   r   rv   r   r   staticmethodr|   rG   r   r   r*   r*   rh   r+   r   y  s    	
b
Dr   c                   @   s   e Zd ZdS )KwargsForCausalLMN)rx   ry   rz   r*   r*   r*   r+   r   l  s    r   c                       s  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej deeeeej f  deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&GraniteForCausalLMzlm_head.weightlm_headZcolwise_repr4   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFrZ   )
r\   r]   r   r   r   r   ra   r_   r   r   r   rh   r*   r+   r]   u  s
   
zGraniteForCausalLM.__init__c                 C   s   | j jS rw   r   r   r   r*   r*   r+   r   ~  s   z'GraniteForCausalLM.get_input_embeddingsc                 C   s   || j _d S rw   r   r   r*   r*   r+   r     s   z'GraniteForCausalLM.set_input_embeddingsc                 C   r   rw   r   r   r*   r*   r+   get_output_embeddings  r   z(GraniteForCausalLM.get_output_embeddingsc                 C   r   rw   r   )rg   Znew_embeddingsr*   r*   r+   set_output_embeddings  r   z(GraniteForCausalLM.set_output_embeddingsc                 C   r   rw   r   )rg   decoderr*   r*   r+   set_decoder  r   zGraniteForCausalLM.set_decoderc                 C   r   rw   r   r   r*   r*   r+   get_decoder  r   zGraniteForCausalLM.get_decoderNr   r   rC   r2   r   r   labelsr   ro   r   rl   logits_to_keeprP   r6   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }|| j j	 }d}|dur^| j
d||| j jd|}t|||j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GraniteForCausalLM

        >>> model = GraniteForCausalLM.from_pretrained("meta-granite/Granite-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-granite/Granite-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rC   r2   r   r   r   ro   r   rl   )r   r   r   )lossr   r   r4   r   r*   )rX   ro   r   r   r   r   r|   slicer   Zlogits_scalingZloss_functionr   r   r   r4   r   )rg   r   rC   r2   r   r   r   r   ro   r   rl   r   rP   r   r4   Zslice_indicesr   r   r*   r*   r+   rv     s<   '
zGraniteForCausalLM.forward)NNNNNNNNNNr   )rx   ry   rz   Z_tied_weights_keysZ_tp_planZ_pp_planr]   r   r   r   r   r   r   r   r   r   r&   r~   r}   r   r
   r   r   r   r|   r   r   r   rv   r   r*   r*   rh   r+   r   o  sf    		
r   )r   r   r   )Nr   )r>   )Ctypingr   r   r   r   r   r&   r   Zactivationsr	   Zcache_utilsr
   r   Z
generationr   Zintegrationsr   Zmodeling_attn_mask_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   r   Zconfiguration_graniter   Z!torch.nn.attention.flex_attentionr   Zintegrations.flex_attentionr    Z
get_loggerrx   rt   r,   r3   r}   r|   r=   Moduler   rV   rW   r   r   r   r   r   r   r   r   __all__r*   r*   r*   r+   <module>   sl   


MM" sm