o
    ZhS                     @   s  d dl mZmZmZmZmZ d dlZd dlm  m	Z
 d dlmZ ddlmZ ddlmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZ ddlmZmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z% ddl&m'Z' e$ rd dl(m)Z) ddl*m+Z+ e%,e-Z.			d@deej/eej/ df dee0 deej/ deej/e0f fddZ1G dd dej2Z3e!4e3 G dd dej2Z5dd Z6dAd d!Z7G d"d# d#ej2Z8G d$d% d%ej2Z9G d&d' d'ej2Z:d(ej/d)e0dej/fd*d+Z;G d,d- d-ej2Z<	.dBd/ej2d0ej/d1ej/d2ej/deej/ d3e=d4e=fd5d6Z>G d7d8 d8eZ?e#G d9d: d:eZ@e#G d;d< d<e@ZAG d=d> d>e@eZBg d?ZCdS )C    )CallableListOptionalTupleUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)GradientCheckpointingLayer)BaseModelOutputWithPastMoeCausalLMOutputWithPastMoeModelOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)ALL_LAYERNORM_LAYERS)auto_docstringis_torch_flex_attn_availablelogging   )GraniteMoeConfig)	BlockMask)make_flex_block_causal_mask   gate_logitsnum_expertsattention_maskreturnc                    s  | du s	t | tsdS t | tr#| d j tj fdd| D dd}tjjj|dd}tj||dd\}}tjj	||}|du rStj
| dd}	tj
|dd}
ng|j\}}|jd ||  }|dddddddf |||||fd|| }tj| | ddtj|dd }	|ddddddf ||||fd| }tj|| ddtj|dd }
t|	|
d }|| S )a  
    Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.

    See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
    function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
    experts is too unbalanced.

    Args:
        gate_logits:
            Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
            shape [batch_size X sequence_length, num_experts].
        num_experts:
            Number of experts
        top_k:
            The number of experts to route per-token, can be also interpreted as the `top-k` routing
            parameter.
        attention_mask (`torch.Tensor`, *optional*):
            The attention_mask used in forward function
            shape [batch_size X sequence_length] if not None.

    Returns:
        The auxiliary loss.
    Nr   c                    s   g | ]}|  qS  )to).0Z
layer_gateZcompute_devicer#   a/var/www/auris/lib/python3.10/site-packages/transformers/models/granitemoe/modeling_granitemoe.py
<listcomp>O       z,load_balancing_loss_func.<locals>.<listcomp>dim)
isinstancetupledevicetorchcatr   
functionalsoftmaxtopkZone_hotmeanfloatshapeexpandreshaper$   sum	unsqueeze)r   r    top_kr!   Zconcatenated_gate_logitsZrouting_weights_Zselected_expertsZexpert_maskZtokens_per_expertZrouter_prob_per_expert
batch_sizesequence_lengthnum_hidden_layersZexpert_attention_maskZ router_per_expert_attention_maskZoverall_lossr#   r&   r'   load_balancing_loss_func-   s>   



rA   c                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	GraniteMoeRMSNormư>c                    s&   t    tt|| _|| _dS )z@
        GraniteMoeRMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parameterr0   Zonesweightvariance_epsilon)selfhidden_sizeeps	__class__r#   r'   rE      s   

zGraniteMoeRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr   r,   T)Zkeepdim)	dtyper$   r0   float32powr5   ZrsqrtrH   rG   )rI   hidden_statesZinput_dtypeZvariancer#   r#   r'   forward   s
   zGraniteMoeRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r.   rG   r7   rH   rI   r#   r#   r'   
extra_repr   s   zGraniteMoeRMSNorm.extra_repr)rC   )__name__
__module____qualname__rE   rR   rT   __classcell__r#   r#   rL   r'   rB      s    rB   c                       s8   e Zd Zddef fddZe edd Z  Z	S )GraniteMoeRotaryEmbeddingNconfigc                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)rD   rE   hasattrr[   getr\   max_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenrZ   r   Zrope_init_fnattention_scalingZregister_bufferr_   Zoriginal_inv_freq)rI   rZ   r/   r_   rL   r#   r'   rE      s   
z"GraniteMoeRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r,   r   ZmpscpuF)device_typeenabledr   r*   )rN   )r_   r6   r8   r7   r$   r/   r-   r]   strr0   Zautocast	transposer1   cosrd   sinrN   )
rI   xposition_idsZinv_freq_expandedZposition_ids_expandedrf   ZfreqsZembrj   rk   r#   r#   r'   rR      s   0&z!GraniteMoeRotaryEmbedding.forwardN)
rU   rV   rW   r   rE   r0   Zno_gradr   rR   rX   r#   r#   rL   r'   rY      s
    rY   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr,   r   r*   )r7   r0   r1   )rl   x1Zx2r#   r#   r'   rotate_half   s   rp   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )r;   rp   )qkrj   rk   rm   Zunsqueeze_dimZq_embedZk_embedr#   r#   r'   apply_rotary_pos_emb   s
   

rs   c                       s6   e Zd Zdedededdf fddZdd	 Z  ZS )
GraniteMoeParallelExpertsr    
input_sizeoutput_sizer"   Nc                    s6   t    tt|||| _|| _|| _|| _	dS )a  
        Initialize the GraniteMoeParallelExperts module.
        The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's compatible with
        many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and
        [ScatterMoE](https://github.com/shawntan/scattermoe), as well as the
        [MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py)
        used in vllm.

        Args:
            num_experts (int):
                Number of experts.
            input_size (int):
                Size of the input.
            output_size (int):
                Size of the output.
        N)
rD   rE   r   rF   r0   emptyrG   r    ru   rv   )rI   r    ru   rv   rL   r#   r'   rE      s
   

z"GraniteMoeParallelExperts.__init__c                 C   sP   |j |dd}g }t| jD ]}|t|| | j|  qtj|dd}|S )a  
        Forward pass of the GraniteMoeParallelExperts module.

        Args:
            inputs (Tensor):
                Input tensor.
            expert_size:
                Expert size information.

        Returns:
            Tensor: Output tensor.
        r   r*   )	splitranger    appendFZlinearrG   r0   r1   )rI   Zinputsexpert_sizeZ
input_listZoutput_listiresultsr#   r#   r'   rR      s   z!GraniteMoeParallelExperts.forwardrU   rV   rW   intrE   rR   rX   r#   r#   rL   r'   rt      s    rt   c                       s2   e Zd Zdededef fddZdd Z  ZS )GraniteMoeTopKGatingru   r    r<   c                    s2   t    || _|| _|| _tj||dd| _dS )a  
        Initialize the top-k gating mechanism.
        Args:
            input_size (`int`):
                Size of the input.
            num_experts (`int`):
                Number of experts.
            top_k (`int`):
                Number of top experts to select.
        FbiasN)rD   rE   r    ru   r<   r   Linearlayer)rI   ru   r    r<   rL   r#   r'   rE     s
   
zGraniteMoeTopKGating.__init__c                 C   s   |  | }|j| jdd\}}tj|dd|}tj|d| j	g|j
|jd}|d|d}| d}| }| }	|	d\}
}|j| jdd}| }|| }|||||fS )Nr   r*   r   rN   r/   trunc)Zrounding_mode)r   r6   r4   r<   r0   r3   Ztype_aszerossizer    rN   r/   Zscatterlongr:   tolistflattensortdiv)rI   rQ   logitsZtop_k_logitsZtop_k_indicesZtop_k_gatesr   Zgatesr|   Ztop_k_expertsr=   Zindex_sorted_expertsbatch_indexbatch_gatesr#   r#   r'   rR   !  s   zGraniteMoeTopKGating.forwardr   r#   r#   rL   r'   r     s    r   c                       s.   e Zd ZdZdef fddZdd Z  ZS )GraniteMoeMoEz
    A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.

    Args:
        config:
            Configuration object with model hyperparameters.
    rZ   c                    sp   t t|   |j| _|j| _t|j | _t	|j
| j| jd | _t	|j
| j| j| _t| j|j
|jd| _d S )Nr   )ru   r    r<   )rD   r   rE   rJ   ru   Zintermediate_sizer	   Z
hidden_act
activationrt   num_local_expertsinput_linearoutput_linearr   num_experts_per_tokrouterrI   rZ   rL   r#   r'   rE   F  s   zGraniteMoeMoE.__init__c                 C   s   |  \}}}|d|}| |\}}}}}	|| }
| |
|}|jddd}| |d |d  }| ||}||dddf  }tj|| | j	f|j
|jd}|d||}|||| j	}||	fS )a  
        Forward pass of the mixture of experts layer.

        Args:
            layer_input (Tensor):
                Input tensor.

        Returns:
            Tensor:
                Output tensor.
            Tensor:
                Router logits.
        r,   r   r*   r   r   Nr   )r   r9   r   r   chunkr   r   r0   r   ru   rN   r/   Z	index_addview)rI   Zlayer_inputbszlengthZemb_sizer=   r   r   r|   router_logitsZexpert_inputsrQ   Zchunked_hidden_statesZexpert_outputsr   Zlayer_outputr#   r#   r'   rR   U  s   zGraniteMoeMoE.forward)rU   rV   rW   __doc__r   rE   rR   rX   r#   r#   rL   r'   r   =  s    r   rQ   n_repc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r7   r8   r9   )rQ   r   batchnum_key_value_headsslenhead_dimr#   r#   r'   	repeat_kvv  s
   0r   c                       s   e Zd ZdZddedee f fddZ						ddej	d	eej	 d
eej
 dee dedeej
 deeej	ej	f  deej	eej	 eeej	  f fddZ  ZS )GraniteMoeAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNrZ   	layer_idxc                    s   t    || _|| _|d u rtd| jj d |j| _|j	| _	|j
| _| j	| j | _|j| _| j| j | _d| _|j| _| j| j | j	krUtd| j	 d| j dtj| j	| j| j |jd| _tj| j	| j| j |jd| _tj| j	| j| j |jd| _tj| j	| j	|jd| _d S )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r   )rD   rE   rZ   r   loggerwarning_oncerM   rU   attention_dropoutrJ   num_attention_heads	num_headsr   r   num_key_value_groupsZ	is_causalZattention_multiplierscaling
ValueErrorr   r   Zattention_biasq_projk_projv_projo_projrI   rZ   r   rL   r#   r'   rE     s2   

zGraniteMoeAttention.__init__FrQ   r!   rm   past_key_value	use_cachecache_positionposition_embeddingsr"   c                 K   sj  |  \}	}
}| |}| |}| |}||	|
| j| jdd}||	|
| j| jdd}||	|
| j| jdd}|d urF|nd\}}|d urWt	||||\}}|d url|||d}|
||| j|\}}t}| jjdkr| jjdkr|ddrtd	 nt| jj }|| ||||f| jsd
n| j| jd|\}}||	|
d}| |}|||fS )Nr   r   )NN)rk   rj   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )dropoutr   r,   )r   r   r   r   r   r   r   ri   r   rs   updater   eager_attention_forwardrZ   _attn_implementationrb   r   r   r   trainingr   r   r   )rI   rQ   r!   rm   r   r   r   r   kwargsr   Zq_lenr=   Zquery_states
key_statesvalue_statesrj   rk   Zcache_kwargsZattention_interfaceattn_outputattn_weightsr#   r#   r'   rR     sF   





zGraniteMoeAttention.forwardrn   )NNNFNN)rU   rV   rW   r   r   r   r   rE   r0   Tensor
LongTensorr
   boolr   rR   rX   r#   r#   rL   r'   r     s4    #
r   r   modulequerykeyvaluer   r   c                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr   r   r,   )r+   rN   )pr   r   )r   r   r0   matmulri   r7   r   r2   r3   rO   r$   rN   r   r   
contiguous)r   r   r   r   r!   r   r   r   r   r   r   causal_maskr   r#   r#   r'   r     s   
&r   c                       s   e Zd Zdedef fddZ								ddejdeej d	eej	 d
ee
 dee dee deej	 dee deeejejf  deejeeejejf  f fddZ  ZS )GraniteMoeDecoderLayerrZ   r   c                    sZ   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|j| _d S )N)rZ   r   rK   )rD   rE   rJ   r   	self_attnr   block_sparse_moerB   rms_norm_epsinput_layernormpost_attention_layernormresidual_multiplierr   rL   r#   r'   rE     s   

zGraniteMoeDecoderLayer.__init__NFrQ   r!   rm   r   r   r   r   output_router_logitsr   r"   c
                 K   s   |}|  |}| jd||||||||	d|
\}}}||| j  }|}| |}| |\}}||| j  }|f}|rA||f7 }|rH||f7 }|rO||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            output_router_logits (`bool`, *optional*):
                Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
                should not be returned during inference.
            position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
                into the model
        )rQ   r!   rm   r   r   r   r   r   Nr#   )r   r   r   r   r   )rI   rQ   r!   rm   r   r   r   r   r   r   r   ZresidualZself_attn_weightsZpresent_key_valuer   outputsr#   r#   r'   rR     s6   &
	



zGraniteMoeDecoderLayer.forward)NNNFFNFN)rU   rV   rW   r   r   rE   r0   r   r   r   r
   r   r   FloatTensorrR   rX   r#   r#   rL   r'   r     s>    	
r   c                   @   s@   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdd ZdS )	GraniteMoePreTrainedModelmodelTr   past_key_valuesFc                 C   s   t |tjr |jjjd| jjd |jd ur|jj	  d S d S t |tj
rC|jjjd| jjd |jd urA|jj|j 	  d S d S t |trQ|jjd d S t |trc|jjjd| jjd d S d S )Nr   )r5   Zstdg      ?)r-   r   r   rG   dataZnormal_rZ   Zinitializer_ranger   Zzero_	Embeddingpadding_idxrB   Zfill_rt   )rI   r   r#   r#   r'   _init_weightsb  s   



z'GraniteMoePreTrainedModel._init_weightsN)rU   rV   rW   r   Zconfig_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attn_2Z_supports_sdpaZ_supports_cache_classZ_supports_quantized_cacheZ_supports_static_cacher   r#   r#   r#   r'   r   U  s    r   c                       s0  e Zd Zdef fddZdd Zdd Ze											d"d	ee	j
 d
ee	j dee	j
 deeeee	j f  dee	j dee dee dee dee dee dee	j
 deeef fddZ	d#d
ee	jdf de	jde	jdedef
ddZed
e	jdedede	jde	jdefd d!Z  ZS )$GraniteMoeModelrZ   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _d| _ j| _ j| _ j| _| j| j | _ j| _ j| _ j| _| jdkr]t nd | _|   d S )Nc                    s   g | ]}t  |qS r#   )r   )r%   r   rZ   r#   r'   r(   z  r)   z,GraniteMoeModel.__init__.<locals>.<listcomp>r   FZrope)rD   rE   Zpad_token_idr   
vocab_sizer   r   rJ   embed_tokensZ
ModuleListry   r@   layersrB   r   normgradient_checkpointingembedding_multiplierr   r   r   rc   Z
rope_thetaZposition_embedding_typerY   
rotary_emb	post_initr   rL   r   r'   rE   s  s$   zGraniteMoeModel.__init__c                 C      | j S rn   r   rS   r#   r#   r'   get_input_embeddings     z$GraniteMoeModel.get_input_embeddingsc                 C   
   || _ d S rn   r   rI   r   r#   r#   r'   set_input_embeddings     
z$GraniteMoeModel.set_input_embeddingsN	input_idsr!   rm   r   inputs_embedsr   r   output_hidden_statesr   return_dictr   r"   c                 C   sH  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|
d ur$|
n| j j}
|d u |d uA r4td| jrC| jrC|rCt	d d}|d u rL| 
|}|| j }d}|rft|tsfd}t|}t	d |d u r|d urr| nd}tj|||jd  |jd}|d u r|d}| |||||}|}d }| jd ur| ||}|rd	nd }|rd	nd }|	rd	nd }d }| jD ]6}|r||f7 }|||||||||	|d
	}|d }|r||rdnd }|r||d f7 }|	r||d f7 }q| |}|r||f7 }|r|nd }|r| }|
stdd ||||fD S t|||||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FTzWe detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)r   r   r/   r#   )r!   rm   r   r   r   r   r   r   r   r,   c                 s   s    | ]	}|d ur|V  qd S rn   r#   )r%   vr#   r#   r'   	<genexpr>  s    z*GraniteMoeModel.forward.<locals>.<genexpr>)Zlast_hidden_stater   rQ   
attentionsr   )rZ   r   r   r   use_return_dictr   r   r   r   r   r   r   r-   r
   r   Zfrom_legacy_cacheget_seq_lengthr0   aranger7   r/   r;   _update_causal_maskr   r   r   Zto_legacy_cacher.   r   )rI   r   r!   rm   r   r   r   r   r   r   r   r   Zreturn_legacy_cachepast_seen_tokensr   rQ   r   Zall_hidden_statesZall_self_attnsZall_router_logitsZnext_decoder_cacheZdecoder_layerZlayer_outputsZ
next_cacher#   r#   r'   rR     s   









zGraniteMoeModel.forwardFr   input_tensorc                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )NZflash_attention_2r   Zflex_attentionr   Fr   )r   Zpast_key_values_lengthZis_trainingr   r,   )r?   target_lengthrN   r   r>   )cudaZxpuZnpu)rZ   r   anyr-   r0   r   r   r   Zis_compileabler   Z_ignore_causal_mask_sdpar   rN   r7   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr/   r]   finfominZ_unmask_unattended)rI   r!   r  r   r   r   r  Zusing_compilable_cacherN   r?   r  r   	min_dtyper#   r#   r'   r  	  sT   




z#GraniteMoeModel._update_causal_maskr?   r  rN   r>   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuerN   r/   r   )Zdiagonalr   r,   r   )r+   r0   r	  r
  fullr/   Ztriur  r9   r8   cloner7   r$   Zmasked_fill)r!   r?   r  rN   r   r>   r   r   r  Zmask_lengthZpadding_maskr#   r#   r'   r  M  s,    $
6  zEGraniteMoeModel._prepare_4d_causal_attention_mask_with_cache_position)NNNNNNNNNNN)F)rU   rV   rW   r   rE   r   r   r   r   r0   r   r   r   r
   r   r   r   r   r   rR   r  staticmethodr   rN   r  rX   r#   r#   rL   r'   r   q  s    	

|
Dr   c                        s"  e Zd ZdgZdef fddZdd Zdd Zd	d
 Zdd Z	dd Z
dd Ze													d%deej deej deej deeeeej f  deej deej dee dee dee dee dee deej deeejf d eeef fd!d"Zed#d$ Z  ZS )&GraniteMoeForCausalLMzlm_head.weightrZ   c                    sX   t  | t|| _|j| _tj|j|jdd| _|j	| _	|j
| _|j| _|   d S )NFr   )rD   rE   r   r   r   r   r   rJ   lm_headrouter_aux_loss_coefr   r    r   r   r   rL   r#   r'   rE     s   
zGraniteMoeForCausalLM.__init__c                 C   s   | j jS rn   r   r   rS   r#   r#   r'   r     s   z*GraniteMoeForCausalLM.get_input_embeddingsc                 C   s   || j _d S rn   r  r   r#   r#   r'   r     s   z*GraniteMoeForCausalLM.set_input_embeddingsc                 C   r   rn   r  rS   r#   r#   r'   get_output_embeddings  r   z+GraniteMoeForCausalLM.get_output_embeddingsc                 C   r   rn   r  )rI   Znew_embeddingsr#   r#   r'   set_output_embeddings  r   z+GraniteMoeForCausalLM.set_output_embeddingsc                 C   r   rn   r   )rI   decoderr#   r#   r'   set_decoder  r   z!GraniteMoeForCausalLM.set_decoderc                 C   r   rn   r  rS   r#   r#   r'   get_decoder  r   z!GraniteMoeForCausalLM.get_decoderNr   r   r!   rm   r   r   labelsr   r   r   r   r   r   logits_to_keepr"   c                 K   s  |dur|n| j j}|
dur|
n| j j}
|	dur|	n| j j}	|dur$|n| j j}| j||||||||	|
||d}|d }t|trGt| dn|}| 	|dd|ddf }|| j j
 }d}|duru| }| j||fd| j ji|}d}|
rt|r|jn|d | j| j|}|dur|| j||j 7 }|s|f|dd  }|
r|f| }|dur|f| S |S t||||j|j|j|jdS )al  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GraniteMoeForCausalLM

        >>> model = GraniteMoeForCausalLM.from_pretrained("ibm/PowerMoE-3b")
        >>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)r   r!   rm   r   r   r   r   r   r   r   r   r   r   r,   r   )lossaux_lossr   r   rQ   r   r   )rZ   r   r   r   r   r   r-   r   slicer  Zlogits_scalingr6   Zloss_functionr   rA   r   r    r   r  r$   r/   r   r   rQ   r   )rI   r   r!   rm   r   r   r  r   r   r   r   r   r   r  r   r   rQ   Zslice_indicesr   r  r  outputr#   r#   r'   rR     st   (
zGraniteMoeForCausalLM.forwardc                    s.   d}| D ]}|t  fdd|D f7 }q|S )Nr#   c                 3   s$    | ]}| d  |jV  qdS )r   N)Zindex_selectr$   r/   )r%   Z
past_statebeam_idxr#   r'   r     s   " z7GraniteMoeForCausalLM._reorder_cache.<locals>.<genexpr>)r.   )r   r"  Zreordered_pastZ
layer_pastr#   r!  r'   _reorder_cache  s   z$GraniteMoeForCausalLM._reorder_cache)NNNNNNNNNNNNr   )rU   rV   rW   Z_tied_weights_keysr   rE   r   r   r  r  r  r  r   r   r0   r   r   r   r
   r   r   r   r   r   r   rR   r  r#  rX   r#   r#   rL   r'   r    sl    	

lr  )r  r   r   )Nr   N)Nr   )r   )Dtypingr   r   r   r   r   r0   Ztorch.nn.functionalr   r2   r{   Zactivationsr	   Zcache_utilsr
   r   Z
generationr   Zmodeling_attn_mask_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zpytorch_utilsr   utilsr   r   r   Zconfiguration_granitemoer   Z!torch.nn.attention.flex_attentionr   Zintegrations.flex_attentionr   Z
get_loggerrU   r   r   r   rA   ModulerB   rz   rY   rp   rs   rt   r   r   r   r   r6   r   r   r   r   r  __all__r#   r#   r#   r'   <module>   s   

S
#
.09c
Z   