o
    Zhz                     @   s0  d dl mZmZmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$m%Z%m&Z& ddl'm(Z( e% rd dl)m*Z* ddl+m,Z, e&-e.Z/edG dd dej0Z1G dd dej0Z2dd Z3d<ddZ4dej5de6d ej5fd!d"Z7	#d=d$ej0d%ej5d&ej5d'ej5d(eej5 d)e8d*e8fd+d,Z9G d-d. d.ej0Z:G d/d0 d0eZ;G d1d2 d2ej0Z<e#G d3d4 d4eZ=e#G d5d6 d6e=Z>G d7d8 d8ee"Z?e#G d9d: d:e=eZ@g d;ZAdS )>    )CallableOptionalTupleUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )BitNetConfig)	BlockMask)make_flex_block_causal_maskZRMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	BitNetRMSNormư>c                    s&   t    tt|| _|| _dS )z<
        BitNetRMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	ParametertorchZonesweightvariance_epsilon)selfhidden_sizeeps	__class__ Y/var/www/auris/lib/python3.10/site-packages/transformers/models/bitnet/modeling_bitnet.pyr#   4   s   

zBitNetRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )N   T)Zkeepdim)	dtypetor%   float32powmeanZrsqrtr'   r&   )r(   hidden_statesZinput_dtypeZvariancer-   r-   r.   forward<   s
   zBitNetRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler&   shaper'   r(   r-   r-   r.   
extra_reprC   s   zBitNetRMSNorm.extra_repr)r!   )__name__
__module____qualname__r#   r7   r;   __classcell__r-   r-   r+   r.   r    2   s    r    c                       s*   e Zd Zdef fddZdd Z  ZS )	BitNetMLPconfigc                    s   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _t|j|jd| _d S )NFbiasr*   )r"   r#   rA   r)   Zintermediate_sizer   Linear	gate_projup_proj	down_projr   Z
hidden_actact_fnr    rms_norm_epsffn_sub_normr(   rA   r+   r-   r.   r#   H   s   
zBitNetMLP.__init__c              	   C   s*   |  | | | || | }|S N)rH   rK   rI   rF   rG   )r(   xrH   r-   r-   r.   r7   S   s   &zBitNetMLP.forward)r<   r=   r>   r   r#   r7   r?   r-   r-   r+   r.   r@   G   s    r@   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr0   r/   dim)r9   r%   cat)rN   x1Zx2r-   r-   r.   rotate_halfX   s   rS   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerS   )qkcossinposition_idsZunsqueeze_dimZq_embedZk_embedr-   r-   r.   apply_rotary_pos_emb_   s
   

rZ   r6   n_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r9   expandreshape)r6   r[   batchnum_key_value_headsslenhead_dimr-   r-   r.   	repeat_kvz   s
   0rc           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr/   r   r0   )rP   r1   )ptrainingr   )rc   num_key_value_groupsr%   matmul	transposer9   r   Z
functionalZsoftmaxr3   r2   r1   rk   rn   
contiguous)re   rf   rg   rh   ri   rj   rk   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr-   r-   r.   eager_attention_forward   s   
&ry   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	e
ej d
e
e de
ej dee de	eje
ej e
e	ej  f fddZ  ZS )BitNetAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrA   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _t|j|jd| _d S )Nrb   g      TrB   rD   )r"   r#   rA   r{   getattrr)   Znum_attention_headsrb   r`   ro   rj   attention_dropoutZ	is_causalr   rE   Zattention_biasq_projk_projv_projo_projr    rJ   attn_sub_normr(   rA   r{   r+   r-   r.   r#      s*   
zBitNetAttention.__init__Nr6   position_embeddingsri   past_key_valuecache_positionrs   r\   c                 K   sR  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkrw| jjdkrq|ddrqtd	 nt| jj }|| |	|
||f| jsd
n| j| jd|\}}|jg |dR   }| |}| |}||fS )Nr0   r   r/   )rX   rW   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.rd   )rk   rj   )r9   rb   r~   viewrq   r   r   rZ   updater{   ry   rA   _attn_implementationgetloggerwarning_oncer   rn   r}   rj   r^   rr   r   r   )r(   r6   r   ri   r   r   rs   Zinput_shapeZhidden_shapeZquery_statesrt   ru   rW   rX   Zcache_kwargsZattention_interfacerx   rv   r-   r-   r.   r7      sB   	


zBitNetAttention.forward)NN)r<   r=   r>   __doc__r   intr#   r%   Tensorr   r   r	   
LongTensorr   r   r7   r?   r-   r-   r+   r.   rz      s(    rz   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )BitNetDecoderLayerrA   r{   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)rA   r{   rD   )r"   r#   r)   rz   	self_attnr@   mlpr    rJ   input_layernormpost_attention_layernormr   r+   r-   r.   r#      s   

zBitNetDecoderLayer.__init__NFr6   ri   rY   r   r   	use_cacher   r   rs   r\   c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)r6   ri   rY   r   r   r   r   r   r-   )r   r   r   r   )r(   r6   ri   rY   r   r   r   r   r   rs   ZresidualZself_attn_weightsoutputsr-   r-   r.   r7      s.   
	



zBitNetDecoderLayer.forward)NNNFFNN)r<   r=   r>   r   r   r#   r%   r   r   r   r	   boolr   r   r   FloatTensorr7   r?   r-   r-   r+   r.   r      s<    	
r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )BitNetRotaryEmbeddingNrA   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r"   r#   hasattrr   r   r   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenrA   r   Zrope_init_fnattention_scalingZregister_bufferr   Zoriginal_inv_freq)r(   rA   devicer   r+   r-   r.   r#   %  s   
zBitNetRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r0   r   ZmpscpuF)device_typeenabledr/   rO   )r1   )r   floatr]   r9   r2   r   
isinstancer   strr%   Zautocastrq   rQ   rW   r   rX   r1   )
r(   rN   rY   Zinv_freq_expandedZposition_ids_expandedr   ZfreqsZembrW   rX   r-   r-   r.   r7   6  s   0&zBitNetRotaryEmbedding.forwardrM   )
r<   r=   r>   r   r#   r%   Zno_gradr   r7   r?   r-   r-   r+   r.   r   $  s
    r   c                   @   sH   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdd ZdS )BitNetPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nrd   )r5   stdg      ?)rA   Zinitializer_ranger   r   rE   r&   dataZnormal_rC   Zzero_	Embeddingpadding_idxr    Zfill_)r(   re   r   r-   r-   r.   _init_weightsU  s   


z#BitNetPreTrainedModel._init_weightsN)r<   r=   r>   r   Zconfig_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attn_2Z_supports_sdpaZ_supports_flex_attnZ_supports_cache_classZ_supports_quantized_cacheZ_supports_static_cacheZ_supports_attention_backendr   r-   r-   r-   r.   r   F  s    r   c                       s  e Zd Zdef fddZdd Zdd Zee									d!d	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ	d"d
ee
jdf de
jde
jdedef
ddZed
e
jdedede
jde
jdefdd Z  ZS )#BitNetModelrA   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r-   )r   ).0r{   rA   r-   r.   
<listcomp>l  s    z(BitNetModel.__init__.<locals>.<listcomp>rD   r   F)r"   r#   Zpad_token_idr   
vocab_sizer   r   r)   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr    rJ   normr   
rotary_embgradient_checkpointing	post_initrL   r+   r   r.   r#   e  s   zBitNetModel.__init__c                 C      | j S rM   r   r:   r-   r-   r.   get_input_embeddingsu     z BitNetModel.get_input_embeddingsc                 C   
   || _ d S rM   r   r(   rh   r-   r-   r.   set_input_embeddingsx     
z BitNetModel.set_input_embeddingsN	input_idsri   rY   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsr\   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}| |||	||}|}| ||}|rdnd }|rdnd }| jd | j j D ]&}|r||f7 }||f||||||	|d	|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||d
S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   r   r-   )ri   rY   r   r   r   r   r   )last_hidden_stater   r6   
attentions)rA   r   r   r   
ValueErrorr   rn   r   r   r   r   r	   r   r
   get_seq_lengthr%   aranger9   r   rT   _update_causal_maskr   r   r   r   r   )r(   r   ri   rY   r   r   r   r   r   r   r   past_seen_tokensrw   r6   r   Zall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsr-   r-   r.   r7   {  sx   



	


zBitNetModel.forwardFr   input_tensorc                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )NZflash_attention_2rd   Zflex_attentionr   Fr   )r   Zpast_key_values_lengthZis_trainingr   r0   )sequence_lengthtarget_lengthr1   r   
batch_size)cudaZxpuZnpu)rA   r   anyr   r%   r   r   r   Zis_compileabler   Z_ignore_causal_mask_sdparn   r1   r9   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   r   finfominZ_unmask_unattended)r(   ri   r   r   r   r   r   Zusing_compilable_cacher1   r   r   rw   	min_dtyper-   r-   r.   r     sT   




zBitNetModel._update_causal_maskr   r   r1   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuer1   r   r   )Zdiagonalr   r0   r   )rP   r%   r   r   fullr   Ztriur   r^   r]   cloner9   r2   Zmasked_fill)ri   r   r   r1   r   r   rs   rw   r   Zmask_lengthZpadding_maskr-   r-   r.   r     s,    $
6  zABitNetModel._prepare_4d_causal_attention_mask_with_cache_position)	NNNNNNNNN)F)r<   r=   r>   r   r#   r   r   r   r   r   r%   r   r   r	   r   r   r   r   r   r7   r   r   staticmethodr   r1   r   r?   r-   r-   r+   r.   r   c  s    	
d
Dr   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r<   r=   r>   r-   r-   r-   r.   r   W  s    r   c                       s   e Zd ZdgZdZdZ fddZdd Zdd Zd	d
 Z	dd Z
dd Zdd Zee											d!deej deej deej dee deej deej dee dee dee deej deeejf dee defdd Z  ZS )"BitNetForCausalLMzlm_head.weightNc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFrB   )
r"   r#   r   r   r   r   rE   r)   lm_headr   rL   r+   r-   r.   r#   `  s
   
zBitNetForCausalLM.__init__c                 C   s   | j jS rM   r   r   r:   r-   r-   r.   r   i  s   z&BitNetForCausalLM.get_input_embeddingsc                 C   s   || j _d S rM   r   r   r-   r-   r.   r   l  s   z&BitNetForCausalLM.set_input_embeddingsc                 C   r   rM   r   r:   r-   r-   r.   get_output_embeddingso  r   z'BitNetForCausalLM.get_output_embeddingsc                 C   r   rM   r   )r(   Znew_embeddingsr-   r-   r.   set_output_embeddingsr  r   z'BitNetForCausalLM.set_output_embeddingsc                 C   r   rM   r   )r(   decoderr-   r-   r.   set_decoderu  r   zBitNetForCausalLM.set_decoderc                 C   r   rM   r   r:   r-   r-   r.   get_decoderx  r   zBitNetForCausalLM.get_decoderr   r   ri   rY   r   r   labelsr   r   r   r   logits_to_keeprs   r\   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )a$  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, transformers.,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, transformers., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, BitNetForCausalLM

        >>> model = BitNetForCausalLM.from_pretrained("microsoft/bitnet-b1.58-2B-4T")
        >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/bitnet-b1.58-2B-4T")

        >>> prompt = f'<|begin_of_text|>User: Hey, are you conscious? Can you talk to me?<|eot_id|>Assistant: '
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=100)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "User: Hey, are you conscious? Can you talk to me?Assistant: No, I'm not conscious. I'm an artificial intelligence designed to assist with information and tasks. How can I help you today?"
        ```N)	r   ri   rY   r   r   r   r   r   r   )logitsr   r   )lossr   r   r6   r   r-   )rA   r   r   r   r   r   r   slicer   Zloss_functionr   r   r   r6   r   )r(   r   ri   rY   r   r   r   r   r   r   r   r   rs   r   r6   Zslice_indicesr   r   r-   r-   r.   r7   {  s:   '
zBitNetForCausalLM.forward)NNNNNNNNNNr   )r<   r=   r>   Z_tied_weights_keysZ_tp_planZ_pp_planr#   r   r   r   r   r   r   r   r   r   r%   r   r   r	   r   r   r   r   r   r   r   r7   r?   r-   r-   r+   r.   r   Z  sf    		
r   )r   r   r   )Nr   )rd   )Btypingr   r   r   r   r%   r   Zactivationsr   Zcache_utilsr	   r
   Z
generationr   Zintegrationsr   Zmodeling_attn_mask_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   r   Zconfiguration_bitnetr   Z!torch.nn.attention.flex_attentionr   Zintegrations.flex_attentionr   Z
get_loggerr<   r   Moduler    r@   rS   rZ   r   r   rc   r   ry   rz   r   r   r   r   r   r   __all__r-   r-   r-   r.   <module>   sl   


O5" tl