o
    Zh[                     @   s  d Z ddlmZ ddlmZmZmZmZ ddlZddl	m
  mZ ddlZddlm
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZmZ ddlmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z' e(e)Z*			d2deej+eej+ df dee, deej+ deej+e,f fddZ-G dd de
j.Z/G dd de
j.Z0G dd de$Z1G dd deZ2G dd  d e
j.Z3G d!d" d"e%Z4G d#d$ d$e#Z5G d%d& d&e"Z6G d'd( d(eeZ7G d)d* d*eZ8G d+d, d,e Z9G d-d. d.e!Z:G d/d0 d0eZ;g d1Z<dS )3zPyTorch Mixtral model.    )partial)ListOptionalTupleUnionN)nn   )ACT2FN)DynamicCache)FlashAttentionKwargs)MoeCausalLMOutputWithPastMoeModelOutputWithPast)Unpack)
LossKwargslogging   )	MistralAttentionMistralForCausalLMMistralForQuestionAnswering MistralForSequenceClassificationMistralForTokenClassificationMistralModelMistralPreTrainedModelMistralRMSNormMistralRotaryEmbedding   )MixtralConfiggate_logitsnum_expertsattention_maskreturnc                    s  | du s	t | tsdS t | tr#| d j tj fdd| D dd}tjjj|dd}tj||dd\}}tjj	||}|du rStj
| dd}	tj
|dd}
ng|j\}}|jd ||  }|dddddddf |||||fd|| }tj| | ddtj|dd }	|ddddddf ||||fd| }tj|| ddtj|dd }
t|	|
d }|| S )a  
    Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.

    See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
    function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
    experts is too unbalanced.

    Args:
        gate_logits:
            Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
            shape [batch_size X sequence_length, num_experts].
        num_experts:
            Number of experts
        top_k:
            The number of experts to route per-token, can be also interpreted as the `top-k` routing
            parameter.
        attention_mask (`torch.Tensor`, *optional*):
            The attention_mask used in forward function
            shape [batch_size X sequence_length] if not None.

    Returns:
        The auxiliary loss.
    Nr   c                    s   g | ]}|  qS  )to).0Z
layer_gateZcompute_devicer!   Z/var/www/auris/lib/python3.10/site-packages/transformers/models/mixtral/modular_mixtral.py
<listcomp>W       z,load_balancing_loss_func.<locals>.<listcomp>dim)
isinstancetupledevicetorchcatr   
functionalsoftmaxtopkone_hotmeanfloatshapeexpandreshaper"   sum	unsqueeze)r   r   top_kr   Zconcatenated_gate_logitsrouting_weights_selected_expertsexpert_maskZtokens_per_expertZrouter_prob_per_expert
batch_sizesequence_lengthnum_hidden_layersZexpert_attention_maskZ router_per_expert_attention_maskZoverall_lossr!   r$   r%   load_balancing_loss_func5   s>   



rC   c                       s*   e Zd Zdef fddZdd Z  ZS )MixtralBlockSparseTop2MLPconfigc                    sl   t    |j| _|j| _tj| j| jdd| _tj| j| jdd| _	tj| j| jdd| _
t|j | _d S )NFZbias)super__init__intermediate_sizeffn_dimhidden_size
hidden_dimr   Linearw1w2w3r	   Z
hidden_actact_fnselfrE   	__class__r!   r%   rH      s   
z"MixtralBlockSparseTop2MLP.__init__c                 C   s(   |  | || | }| |}|S N)rQ   rN   rP   rO   )rS   hidden_statescurrent_hidden_statesr!   r!   r%   forward   s   
z!MixtralBlockSparseTop2MLP.forward)__name__
__module____qualname__r   rH   rY   __classcell__r!   r!   rT   r%   rD      s    rD   c                       s6   e Zd ZdZ fddZdejdejfddZ  ZS )MixtralSparseMoeBlocka  
    This implementation is
    strictly equivalent to standard MoE with full capacity (no
    dropped tokens). It's faster since it formulates MoE operations
    in terms of block-sparse operations to accommodate imbalanced
    assignments of tokens to experts, whereas standard MoE either
    (1) drop tokens at the cost of reduced performance or (2) set
    capacity factor to number of experts and thus waste computation
    and memory on padding.
    c                    sl   t     j| _ j| _ j| _ j| _	t
j| j| jdd| _t
 fddt| jD | _ j| _d S )NFrF   c                    s   g | ]}t  qS r!   )rD   )r#   r=   rE   r!   r%   r&      s    z2MixtralSparseMoeBlock.__init__.<locals>.<listcomp>)rG   rH   rK   rL   rI   rJ   num_local_expertsr   num_experts_per_tokr;   r   rM   gate
ModuleListrangeexpertsZrouter_jitter_noisejitter_noiserR   rT   r_   r%   rH      s   
 zMixtralSparseMoeBlock.__init__rW   r    c                 C   sp  |j \}}}| jr| jdkr|t|d| j d| j 9 }|d|}| |}tj	|dtj
d}tj|| jdd\}}||jddd }||j}tj|| |f|j|jd	}tjjj|| jd
ddd}	|	jdddkjddd  }
|
D ]0}| j| }t|	| \}}|d|f d|}|||||df  }|d|||j q|||||}||fS ) r   g      ?r*   r   )r)   dtyper(   T)r)   Zkeepdim)rh   r-   )Znum_classesr   )r*   )as_tupleN)r6   trainingrf   r.   Z
empty_likeZuniform_viewrb   Fr1   r5   r2   r;   r9   r"   rh   Zzerosr-   r   r0   r3   r   ZpermuteZnonzerotolistre   wherer8   Z
index_add_)rS   rW   r@   rA   rL   router_logitsr<   r>   Zfinal_hidden_statesr?   Zexpert_hittedZ
expert_idxZexpert_layeridxZtop_xZcurrent_staterX   r!   r!   r%   rY      s,   "
 
zMixtralSparseMoeBlock.forward)	rZ   r[   r\   __doc__rH   r.   TensorrY   r]   r!   r!   rT   r%   r^      s    r^   c                   @      e Zd ZdS )MixtralRMSNormNrZ   r[   r\   r!   r!   r!   r%   ru          ru   c                   @   rt   )MixtralAttentionNrv   r!   r!   r!   r%   rx      rw   rx   c                       s   e Zd Zdedef fddZ								ddejdeej d	eej	 d
ee
ej  dee dee dee deej	 dee
ejejf  dee de
ejee
ejejf  f fddZ  ZS )MixtralDecoderLayerrE   	layer_idxc                    sP   t    |j| _t||| _t|| _t|j|jd| _	t|j|jd| _
d S )N)Zeps)rG   rH   rK   rx   	self_attnr^   block_sparse_moeru   Zrms_norm_epsinput_layernormpost_attention_layernorm)rS   rE   rz   rT   r!   r%   rH      s   

zMixtralDecoderLayer.__init__NFrW   r   position_idspast_key_valueoutput_attentionsoutput_router_logits	use_cachecache_positionposition_embeddingskwargsr    c
                 K   s   |}|  |}| jd||	||||||d|
\}}|| }|}| |}| |\}}|| }|f}|r:||f7 }|rA||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, sequence_length)` where padding elements are indicated by 0.
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_router_logits (`bool`, *optional*):
                Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
                should not be returned during inference.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
                into the model
        )rW   r   r   r   r   r   r   r   Nr!   )r}   r{   r~   r|   )rS   rW   r   r   r   r   r   r   r   r   r   ZresidualZself_attn_weightsrp   outputsr!   r!   r%   rY      s2   #
	



zMixtralDecoderLayer.forward)NNNFFFNN)rZ   r[   r\   r   intrH   r.   rs   r   
LongTensorr   boolr   r   FloatTensorrY   r]   r!   r!   rT   r%   ry      sB    	
ry   c                   @   rt   )MixtralRotaryEmbeddingNrv   r!   r!   r!   r%   r   5  rw   r   c                   @   s   e Zd ZdZdS )MixtralPreTrainedModelFN)rZ   r[   r\   Z_supports_static_cacher!   r!   r!   r%   r   9  s    r   c                       s   e Zd Zdef fddZ										ddeej deej deej dee	ej
  d	eej
 d
ee dee dee dee deej dee defddZ  ZS )MixtralModelrE   c                    s0   t    t fddt jD | _d S )Nc                    s   g | ]}t  |qS r!   )ry   )r#   rz   r_   r!   r%   r&   A  r'   z)MixtralModel.__init__.<locals>.<listcomp>)rG   rH   r   rc   rd   rB   layersrR   rT   r_   r%   rH   >  s   
zMixtralModel.__init__N	input_idsr   r   past_key_valuesinputs_embedsr   r   output_hidden_statesr   r   flash_attn_kwargsr    c                 K   s  |d ur|n| j j}|	d ur|	n| j j}	|d ur|n| j j}|d ur$|n| j j}|d u |d uA r4td| jrC| jrC|rCt	d d}|rL|d u rLt
 }|d u rU| |}|
d u rq|d ura| nd}tj|||jd  |jd}
|d u rz|
d}| |||
||}|}| ||}|rdnd }|rdnd }|	rdnd }| jD ]L}|r||f7 }| jr| jr| t|jfi |||||||	||
|
}n||f|||||	||
|d|}|d }|r||d f7 }|	r||d	 f7 }q| |}|r||f7 }t|||||d
S )Nz:You must specify exactly one of input_ids or inputs_embedszZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr   r   )r-   r!   )r   r   r   r   r   r   r   r   r*   )last_hidden_stater   rW   
attentionsrp   )rE   r   r   r   r   
ValueErrorZgradient_checkpointingrk   loggerZwarning_oncer
   Zembed_tokensZget_seq_lengthr.   Zaranger6   r-   r:   Z_update_causal_maskZ
rotary_embr   Z_gradient_checkpointing_funcr   __call__Znormr   )rS   r   r   r   r   r   r   r   r   r   r   r   Zpast_seen_tokensZcausal_maskrW   r   Zall_hidden_statesZall_self_attnsZall_router_logitsZdecoder_layerZlayer_outputsr!   r!   r%   rY   D  s   







zMixtralModel.forward)
NNNNNNNNNN)rZ   r[   r\   r   rH   r   r.   r   rs   r   r   r   r   r   r   rY   r]   r!   r!   rT   r%   r   =  sJ    	
r   c                   @   rt   )KwargsForCausalLMNrv   r!   r!   r!   r%   r     s    r   c                       s   e Zd ZdgZ fddZ												ddeej deej deej d	ee	ej
  d
eej
 deej dee dee dee dee deej deeejf dee defddZ  ZS )MixtralForCausalLMzlm_head.weightc                    s2   t  | t|| _|j| _|j| _|j| _d S rV   )rG   rH   r   modelrouter_aux_loss_coefr`   r   ra   rR   rT   r!   r%   rH     s
   
zMixtralForCausalLM.__init__Nr   r   r   r   r   r   labelsr   r   r   r   r   logits_to_keepr   r    c                 K   s  |dur|n| j j}|
dur|
n| j j}
|	dur|	n| j j}	| jd||||||||	|
|d
|}|j}t|tr?t| dn|}| 	|dd|ddf }d}|dura| j
||| jfi |}d}|
r~t|j| j| j|}|dur~|| j||j 7 }t||||j|j|j|jdS )a~  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, MixtralForCausalLM

        >>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
        >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)
r   r   r   r   r   r   r   r   r   r   )lossaux_losslogitsr   rW   r   rp   r!   )rE   r   r   r   r   r   r+   r   sliceZlm_headZloss_functionZ
vocab_sizerC   rp   r   ra   r   r"   r-   r   r   rW   r   )rS   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rW   Zslice_indicesr   r   r   r!   r!   r%   rY     sX   'zMixtralForCausalLM.forward)NNNNNNNNNNNr   )rZ   r[   r\   Z_tied_weights_keysrH   r   r.   r   rs   r   r   r   r   r   r   r   r   rY   r]   r!   r!   rT   r%   r     sX    		
r   c                   @   rt   ) MixtralForSequenceClassificationNrv   r!   r!   r!   r%   r   "  rw   r   c                   @   rt   )MixtralForTokenClassificationNrv   r!   r!   r!   r%   r   &  rw   r   c                   @   rt   )MixtralForQuestionAnsweringNrv   r!   r!   r!   r%   r   *  rw   r   )r   r   r   r   r   r   )Nr   N)=rr   	functoolsr   typingr   r   r   r   r.   Ztorch.nn.functionalr   r0   rm   Ztorch.utils.checkpointZactivationsr	   Zcache_utilsr
   Zmodeling_flash_attention_utilsr   Zmodeling_outputsr   r   Zprocessing_utilsr   utilsr   r   Zmistral.modeling_mistralr   r   r   r   r   r   r   r   r   Zconfiguration_mixtralr   Z
get_loggerrZ   r   rs   r   rC   ModulerD   r^   ru   rx   ry   r   r   r   r   r   r   r   r   __all__r!   r!   r!   r%   <module>   sT   ,

RCQzh