o
    Zht                     @   sf  d dl mZmZmZmZ d dlZd dlmZ ddlm	Z	 ddl
mZmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z) ddl*m+Z+ e( rd dl,m-Z- ddl.m/Z/ e)0e1Z2G dd dej3Z4G dd deZ5dej6de7dej6fddZ8	dAdej3dej6d ej6d!ej6d"eej6 d#e9d$e9fd%d&Z:d'd( Z;dBd)d*Z<G d+d, d,ej3Z=G d-d. d.ee%Z>ed/G d0d1 d1ej3Z?G d2d3 d3ej3Z@e&G d4d5 d5e!ZAe&G d6d7 d7eAZBe&G d8d9 d9eAeZCe&d:d;G d<d= d=eAZDe&G d>d? d?eAZEg d@ZFdS )C    )CallableOptionalTupleUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )
Glm4Config)	BlockMask)make_flex_block_causal_maskc                       s2   e Zd Z fddZdejdejfddZ  ZS )Glm4MLPc                    sP   t    || _tj|jd|j dd| _tj|j|jdd| _t	|j
 | _d S )N   Fbias)super__init__confignnLinearhidden_sizeZintermediate_sizegate_up_proj	down_projr   Z
hidden_actactivation_fnselfr'   	__class__ U/var/www/auris/lib/python3.10/site-packages/transformers/models/glm4/modeling_glm4.pyr&   9   s
   
zGlm4MLP.__init__hidden_statesreturnc                 C   s4   |  |}|jddd\}}|| | }| |S )Nr"   dim)r+   chunkr-   r,   )r/   r4   Z	up_statesZgater2   r2   r3   forwardA   s   

zGlm4MLP.forward)__name__
__module____qualname__r&   torchFloatTensorr:   __classcell__r2   r2   r0   r3   r!   8   s    r!   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )Glm4DecoderLayerr'   	layer_idxc                    sv   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
t|j|jd| _t|j|jd| _d S )N)r'   rB   eps)r%   r&   r*   Glm4Attention	self_attnr!   mlpGlm4RMSNormrms_norm_epsinput_layernormpost_attention_layernormpost_self_attn_layernormpost_mlp_layernormr/   r'   rB   r0   r2   r3   r&   K   s   

zGlm4DecoderLayer.__init__NFr4   attention_maskposition_idspast_key_valueoutput_attentions	use_cachecache_positionposition_embeddingskwargsr5   c	                 K   s   |}
|  |}| jd||||||||d|	\}}| |}|
| }|}
| |}| |}| |}|
| }|f}|rB||f7 }|S )N)r4   rO   rP   rQ   rR   rS   rT   rU   r2   )rJ   rF   rL   rK   rG   rM   )r/   r4   rO   rP   rQ   rR   rS   rT   rU   rV   ZresidualZself_attn_weightsoutputsr2   r2   r3   r:   V   s2   
	





zGlm4DecoderLayer.forward)NNNFFNN)r;   r<   r=   r   intr&   r>   Tensorr   
LongTensorr   boolr   r   r   r?   r:   r@   r2   r2   r0   r3   rA   J   s<    	
rA   r4   n_repr5   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)shapeexpandreshape)r4   r\   batchnum_key_value_headsslenhead_dimr2   r2   r3   	repeat_kv   s
   0rd           modulequerykeyvaluerO   scalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr"   r   r6   )r8   dtype)ptrainingr   )rd   num_key_value_groupsr>   matmul	transposer]   r(   Z
functionalZsoftmaxfloat32torm   rk   ro   
contiguous)rf   rg   rh   ri   rO   rj   rk   rV   
key_statesvalue_statesattn_weightscausal_maskattn_outputr2   r2   r3   eager_attention_forward   s   
&r{   c                 C   s>   | ddddf }| ddddf }t j| |fdddS )	z*Rotates half the hidden dims of the input..r   Nr"   r   r6   r7   rl   )r>   stackflatten)xx1Zx2r2   r2   r3   rotate_half   s   r   c                 C   s   | |}| |}|dd|jd d f jddd}|dd|jd d f jddd}|jd }| dd|f | d|df }}|dd|f |d|df }	}
|| t||  }|	| t|	|  }tj||gdd}tj||
gdd}||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    .Nr6   r"   r7   )	unsqueezer]   Zrepeat_interleaver   r>   cat)qkcossinrP   Zunsqueeze_dimZ
rotary_dimZq_rotZq_passZk_rotZk_passZq_embedZk_embedr2   r2   r3   apply_rotary_pos_emb   s   

$$
""r   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )rE   z=Multi-headed attention from 'Attention Is All You Need' paperNr'   rB   c                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |jdd| _d S )Nrc   g      Tr#   F)r%   r&   r'   rB   getattrr*   Znum_attention_headsrc   ra   rp   rj   attention_dropoutZ	is_causalr(   r)   Zattention_biasq_projk_projv_projo_projrN   r0   r2   r3   r&      s$   
 zGlm4Attention.__init__r4   rU   rO   rQ   rT   rV   r5   c                 K   sH  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkrw| jjdkrq|ddrqtd	 nt| jj }|| |	|
||f| jsd
n| j| jd|\}}|jg |dR   }| |}||fS )Nr6   r   r"   )r   r   rT   eagersdparR   Fz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.re   )rk   rj   )r]   rc   r   viewrr   r   r   r   updaterB   r{   r'   _attn_implementationgetloggerwarning_oncer   ro   r   rj   r_   ru   r   )r/   r4   rU   rO   rQ   rT   rV   Zinput_shapeZhidden_shapeZquery_statesrv   rw   r   r   Zcache_kwargsZattention_interfacerz   rx   r2   r2   r3   r:      s@   	

zGlm4Attention.forwardN)NN)r;   r<   r=   __doc__r   r   rX   r&   r>   rY   r   r   rZ   r   r   r:   r@   r2   r2   r0   r3   rE      s(    rE   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r;   r<   r=   r2   r2   r2   r3   r   &  s    r   ZRMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	rH   ư>c                    s&   t    tt|| _|| _dS )z:
        Glm4RMSNorm is equivalent to T5LayerNorm
        N)r%   r&   r(   	Parameterr>   Zonesweightvariance_epsilon)r/   r*   rD   r0   r2   r3   r&   +  s   

zGlm4RMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr"   r6   T)Zkeepdim)	rm   rt   r>   rs   powmeanZrsqrtr   r   )r/   r4   Zinput_dtypeZvariancer2   r2   r3   r:   3  s
   zGlm4RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler   r]   r   r/   r2   r2   r3   
extra_repr:  s   zGlm4RMSNorm.extra_repr)r   )r;   r<   r=   r&   r:   r   r@   r2   r2   r0   r3   rH   )  s    rH   c                       s8   e Zd Zddef fddZe edd Z  Z	S )Glm4RotaryEmbeddingNr'   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r%   r&   hasattrr   r   r   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenr'   r   Zrope_init_fnattention_scalingZregister_bufferr   Zoriginal_inv_freq)r/   r'   devicer   r0   r2   r3   r&   ?  s   
zGlm4RotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r6   r   ZmpscpuF)device_typeenabledr"   r7   )rm   )r   floatr^   r]   rt   r   
isinstancer   strr>   Zautocastrr   r   r   r   r   rm   )
r/   r~   rP   Zinv_freq_expandedZposition_ids_expandedr   ZfreqsZembr   r   r2   r2   r3   r:   P  s   0&zGlm4RotaryEmbedding.forwardr   )
r;   r<   r=   r   r&   r>   Zno_gradr   r:   r@   r2   r2   r0   r3   r   >  s
    r   c                   @   sH   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdd ZdS )Glm4PreTrainedModelmodelTrA   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nre   )r   stdg      ?)r'   Zinitializer_ranger   r(   r)   r   dataZnormal_r$   Zzero_	Embeddingpadding_idxrH   Zfill_)r/   rf   r   r2   r2   r3   _init_weightso  s   


z!Glm4PreTrainedModel._init_weightsN)r;   r<   r=   r   Zconfig_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attn_2Z_supports_sdpaZ_supports_flex_attnZ_supports_cache_classZ_supports_quantized_cacheZ_supports_static_cacheZ_supports_attention_backendr   r2   r2   r2   r3   r   `  s    r   c                       s  e Zd Zdef fddZdd Zdd Zee									d!d	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ	d"d
ee
jdf de
jde
jdedef
ddZed
e
jdedede
jde
jdefdd Z  ZS )#	Glm4Modelr'   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r2   )rA   ).0rB   r'   r2   r3   
<listcomp>  s    z&Glm4Model.__init__.<locals>.<listcomp>rC   r   F)r%   r&   pad_token_idr   
vocab_sizer(   r   r*   embed_tokensZ
ModuleListrangenum_hidden_layerslayersrH   rI   normr   
rotary_embgradient_checkpointing	post_initr.   r0   r   r3   r&     s   zGlm4Model.__init__c                 C      | j S r   r   r   r2   r2   r3   get_input_embeddings     zGlm4Model.get_input_embeddingsc                 C   
   || _ d S r   r   r/   ri   r2   r2   r3   set_input_embeddings     
zGlm4Model.set_input_embeddingsN	input_idsrO   rP   r   inputs_embedsrS   rR   output_hidden_statesrT   flash_attn_kwargsr5   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}| |||	||}|}| ||}|rdnd }|rdnd }| jd | j j D ]&}|r||f7 }||f||||||	|d	|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||d
S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   r   r2   )rO   rP   rQ   rR   rS   rT   rU   )last_hidden_stater   r4   
attentions)r'   rR   r   rS   
ValueErrorr   ro   r   r   r   r   r   r   r	   get_seq_lengthr>   aranger]   r   r   _update_causal_maskr   r   r   r   r   )r/   r   rO   rP   r   r   rS   rR   r   rT   r   past_seen_tokensry   r4   rU   Zall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsr2   r2   r3   r:     sx   



	


zGlm4Model.forwardFr   input_tensorc                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )NZflash_attention_2re   Zflex_attentionr   Fr   )r   Zpast_key_values_lengthZis_trainingr   r6   )sequence_lengthtarget_lengthrm   rT   
batch_size)cudaZxpuZnpu)r'   r   anyr   r>   rY   r    r   Zis_compileabler   Z_ignore_causal_mask_sdparo   rm   r]   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   r   finfominZ_unmask_unattended)r/   rO   r   rT   r   rR   r   Zusing_compilable_cacherm   r   r   ry   	min_dtyper2   r2   r3   r     sT   




zGlm4Model._update_causal_maskr   r   rm   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuerm   r   r   )Zdiagonalr   r6   r   )r8   r>   r   r   fullr   Ztriur   r_   r^   cloner]   rt   Zmasked_fill)rO   r   r   rm   rT   r   rV   ry   r   Zmask_lengthZpadding_maskr2   r2   r3   r   9  s,    $
6  z?Glm4Model._prepare_4d_causal_attention_mask_with_cache_position	NNNNNNNNN)F)r;   r<   r=   r   r&   r   r   r   r   r   r>   rZ   rY   r   r?   r[   r   r   r   r:   r   r   staticmethodrX   rm   r   r@   r2   r2   r0   r3   r   }  s    	
d
Dr   c                       s  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"eeef fd#d$Z  ZS )&Glm4ForCausalLMzlm_head.weightlm_headZcolwise_repr4   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S NFr#   )
r%   r&   r   r   r   r(   r)   r*   r   r   r.   r0   r2   r3   r&   w  s
   
zGlm4ForCausalLM.__init__c                 C      | j jS r   r   r   r   r2   r2   r3   r        z$Glm4ForCausalLM.get_input_embeddingsc                 C      || j _d S r   r   r   r2   r2   r3   r        z$Glm4ForCausalLM.set_input_embeddingsc                 C   r   r   r   r   r2   r2   r3   get_output_embeddings  r   z%Glm4ForCausalLM.get_output_embeddingsc                 C   r   r   r   )r/   Znew_embeddingsr2   r2   r3   set_output_embeddings  r   z%Glm4ForCausalLM.set_output_embeddingsc                 C   r   r   r   )r/   decoderr2   r2   r3   set_decoder  r   zGlm4ForCausalLM.set_decoderc                 C   r   r   r   r   r2   r2   r3   get_decoder  r   zGlm4ForCausalLM.get_decoderNr   r   rO   rP   r   r   labelsrS   rR   r   rT   logits_to_keeprV   r5   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )ar  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Glm4ForCausalLM

        >>> model = Glm4ForCausalLM.from_pretrained("THUDM/GLM-4-9B-Chat-0414")
        >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/GLM-4-9B-Chat-0414")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rO   rP   r   r   rS   rR   r   rT   )r   r   r   lossr   r   r4   r   r2   )r'   rR   r   r   r   r   rX   slicer   loss_functionr   r   r   r4   r   )r/   r   rO   rP   r   r   r   rS   rR   r   rT   r   rV   rW   r4   Zslice_indicesr   r   r2   r2   r3   r:     s:   '
zGlm4ForCausalLM.forward)NNNNNNNNNNr   )r;   r<   r=   Z_tied_weights_keysZ_tp_planZ_pp_planr&   r   r   r   r   r   r   r   r   r   r>   rZ   rY   r   r?   r[   r   rX   r   r   r   r   r:   r@   r2   r2   r0   r3   r   q  sf    		

r   a  
    The Glm4 Model transformer with a sequence classification head on top (linear layer).

    [`Glm4ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )Zcustom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )Glm4ForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S r   )
r%   r&   
num_labelsr   r   r(   r)   r*   scorer   r.   r0   r2   r3   r&     s
   
z&Glm4ForSequenceClassification.__init__c                 C   r   r   r   r   r2   r2   r3   r     r   z2Glm4ForSequenceClassification.get_input_embeddingsc                 C   r   r   r   r   r2   r2   r3   r     r   z2Glm4ForSequenceClassification.set_input_embeddingsNr   rO   rP   r   r   r   rS   rR   r   r5   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        rO   rP   r   r   rS   rR   r   Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r6   )r   rm   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r   )r   r   pooled_logitsr'   r   )r   r   r  r]   r'   r   r   rt   r   r>   Zint32r   Zargmaxr   r   r1   r;   r   r   r   r4   r   )r/   r   rO   rP   r   r   r   rS   rR   r   Ztransformer_outputsr4   r   r   Zlast_non_pad_tokenZnon_pad_maskZtoken_indicesr  r   r2   r2   r3   r:     sL   


z%Glm4ForSequenceClassification.forwardr   )r;   r<   r=   r&   r   r   r   r   r   r>   rZ   rY   r   r?   r[   r   r:   r@   r2   r2   r0   r3   r    sH    		
r  c                       r  )Glm4ForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r%   r&   r  r   r   r   r	  r
  r(   ZDropoutrk   r)   r*   r  r   )r/   r'   r	  r0   r2   r3   r&   D  s   
z#Glm4ForTokenClassification.__init__c                 C   r   r   r   r   r2   r2   r3   r   T  r   z/Glm4ForTokenClassification.get_input_embeddingsc                 C   r   r   r   r   r2   r2   r3   r   W  r   z/Glm4ForTokenClassification.set_input_embeddingsNr   rO   rP   r   r   r   rS   rR   r   r5   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r  r  N)r   r   r4   r   )	r   r   rk   r  r   r'   r   r4   r   )r/   r   rO   rP   r   r   r   rS   rR   r   rW   Zsequence_outputr   r   r2   r2   r3   r:   Z  s,   


z"Glm4ForTokenClassification.forwardr   )r;   r<   r=   r&   r   r   r   r   r   r>   rZ   rY   r   r?   r[   r   r:   r@   r2   r2   r0   r3   r  B  sH    	
r  )r   r   r   r  r  )re   )Nr   )Gtypingr   r   r   r   r>   Ztorch.nnr(   Zactivationsr   Zcache_utilsr   r	   Z
generationr
   Zintegrationsr   Zmodeling_attn_mask_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   r   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   r   Zconfiguration_glm4r   Z!torch.nn.attention.flex_attentionr   Zintegrations.flex_attentionr    Z
get_loggerr;   r   Moduler!   rA   rY   rX   rd   r   r{   r   r   rE   r   rH   r   r   r   r   r  r  __all__r2   r2   r2   r3   <module>   sx   
:

*K" tlVF