
    fTht                        S SK JrJrJrJr  S SKrS SKJr  SSKJ	r	  SSK
JrJr  SSKJr  SSKJr  SSKJr  SS	KJr  SS
KJr  SSKJrJrJrJr  SSKJrJr  SSKJ r J!r!  SSK"J#r#  SSK$J%r%J&r&J'r'J(r(J)r)  SSK*J+r+  \(" 5       (       a  S SK,J-r-  SSK.J/r/  \)R`                  " \15      r2 " S S\Rf                  5      r4 " S S\5      r5S\Rl                  S\7S\Rl                  4S jr8 S<S\Rf                  S\Rl                  S\Rl                  S\Rl                  S \\Rl                     S!\9S"\94S# jjr:S$ r;S=S% jr< " S& S'\Rf                  5      r= " S( S)\\%5      r>\" S*5       " S+ S,\Rf                  5      5       r? " S- S.\Rf                  5      r@\& " S/ S0\!5      5       rA\& " S1 S2\A5      5       rB\& " S3 S4\A\5      5       rC\&" S5S69 " S7 S8\A5      5       rD\& " S9 S:\A5      5       rE/ S;QrFg)>    )CallableOptionalTupleUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )
Glm4Config)	BlockMask)make_flex_block_causal_maskc                   b   ^  \ rS rSrU 4S jrS\R                  S\R                  4S jrSrU =r	$ )Glm4MLP8   c                    > [         TU ]  5         Xl        [        R                  " UR
                  SUR                  -  SS9U l        [        R                  " UR                  UR
                  SS9U l        [        UR                     U l        g )N   Fbias)super__init__confignnLinearhidden_sizeintermediate_sizegate_up_proj	down_projr   
hidden_actactivation_fnselfr+   	__class__s     ^/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/glm4/modeling_glm4.pyr*   Glm4MLP.__init__9   sn    IIf&8&8!f>V>V:V]bc6#;#;V=O=OV[\#F$5$56    hidden_statesreturnc                     U R                  U5      nUR                  SSS9u  p2X R                  U5      -  nU R                  U5      $ )Nr&   dim)r0   chunkr3   r1   )r5   r:   	up_statesgates       r7   forwardGlm4MLP.forwardA   sH    %%m4	#//!/4 2 24 88	~~i((r9   )r3   r+   r1   r0   )
__name__
__module____qualname____firstlineno__r*   torchFloatTensorrC   __static_attributes____classcell__r6   s   @r7   r#   r#   8   s,    7)U%6%6 )5;L;L ) )r9   r#   c                     ^  \ rS rSrS\S\4U 4S jjr       SS\R                  S\	\R                     S\	\R                     S\	\   S	\	\   S
\	\   S\	\R                     S\	\\R                  \R                  4      S\\   S\\R                   \	\\R                   \R                   4      4   4S jjrSrU =r$ )Glm4DecoderLayerJ   r+   	layer_idxc                   > [         TU ]  5         UR                  U l        [        XS9U l        [        U5      U l        [        UR                  UR                  S9U l	        [        UR                  UR                  S9U l
        [        UR                  UR                  S9U l        [        UR                  UR                  S9U l        g )N)r+   rQ   eps)r)   r*   r.   Glm4Attention	self_attnr#   mlpGlm4RMSNormrms_norm_epsinput_layernormpost_attention_layernormpost_self_attn_layernormpost_mlp_layernormr5   r+   rQ   r6   s      r7   r*   Glm4DecoderLayer.__init__K   s    !--&fJ6?*6+=+=6CVCVW(3F4F4FFL_L_(`%(3F4F4FFL_L_(`%"-f.@.@fFYFY"Zr9   r:   attention_maskposition_idspast_key_valueoutput_attentions	use_cachecache_positionposition_embeddingskwargsr;   c	                     Un
U R                  U5      nU R                  " SUUUUUUUUS.U	D6u  pU R                  U5      nX-   nUn
U R                  U5      nU R	                  U5      nU R                  U5      nX-   nU4nU(       a  X4-  nU$ )N)r:   r`   ra   rb   rc   rd   re   rf    )rZ   rV   r\   r[   rW   r]   )r5   r:   r`   ra   rb   rc   rd   re   rf   rg   residualself_attn_weightsoutputss                r7   rC   Glm4DecoderLayer.forwardV   s     !,,]; ,0>> 
,
')%)/) 3
,
 
,
( 55mD 0 !55mD///> 0 "++Gr9   )r.   rZ   rW   r[   r]   r\   rV   )NNNFFNN)rE   rF   rG   rH   r   intr*   rI   Tensorr   
LongTensorr	   boolr   r   r   rJ   rC   rK   rL   rM   s   @r7   rO   rO   J   s   	[z 	[c 	[ 2637*.,1$)59KO+||+ !.+ u//0	+
 !+ $D>+ D>+ !!1!12+ &eELL%,,,F&GH+ -.+ 
u  (51B1BEDUDU1U+V"WW	X+ +r9   rO   r:   n_repr;   c                     U R                   u  p#pEUS:X  a  U $ U SS2SS2SSS2SS24   R                  X#XU5      n U R                  X#U-  XE5      $ )z
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
r   N)shapeexpandreshape)r:   rr   batchnum_key_value_headsslenhead_dims         r7   	repeat_kvr{      s_    
 2?1D1D.Ez!!Qa"23::5W\dlmM  e(CTTTr9   modulequerykeyvaluer`   scalingdropoutc                 @   [        X R                  5      n[        X0R                  5      n	[        R                  " XR	                  SS5      5      U-  n
Ub"  US S 2S S 2S S 2S UR
                  S   24   nX-   n
[        R                  R                  U
S[        R                  S9R                  UR                  5      n
[        R                  R                  XU R                  S9n
[        R                  " X5      nUR	                  SS5      R                  5       nX4$ )Nr&   r   r=   )r?   dtype)ptrainingr   )r{   num_key_value_groupsrI   matmul	transposert   r,   
functionalsoftmaxfloat32tor   r   r   
contiguous)r|   r}   r~   r   r`   r   r   rg   
key_statesvalue_statesattn_weightscausal_maskattn_outputs                r7   eager_attention_forwardr      s     3 ; ;<JU$?$?@L<<';';Aq'ABWLL!$Q1.D
0@0@0D.D%DE#1==((2U]](SVVW\WbWbcL==((6??([L,,|:K''1-88:K$$r9   c                 x    U SSSS24   nU SSSS24   n[         R                  " U* U4SS9R                  S5      $ )	z*Rotates half the hidden dims of the input..r   Nr&   r   r=   r>   r   )rI   stackflatten)xx1x2s      r7   rotate_halfr      sJ    	
319B	
319B;;Ryb)11"55r9   c                    UR                  U5      nUR                  U5      nUSSUR                  S   S-  24   R                  SSS9nUSSUR                  S   S-  24   R                  SSS9nUR                  S   nU SSU24   U SUS24   pUSSU24   USUS24   pXr-  [        U5      U-  -   nX-  [        U	5      U-  -   n[        R
                  " X/SS9n[        R
                  " X/SS9nX4$ )a  Applies Rotary Position Embedding to the query and key tensors.

Args:
    q (`torch.Tensor`): The query tensor.
    k (`torch.Tensor`): The key tensor.
    cos (`torch.Tensor`): The cosine part of the rotary embedding.
    sin (`torch.Tensor`): The sine part of the rotary embedding.
    position_ids (`torch.Tensor`, *optional*):
        Deprecated and unused.
    unsqueeze_dim (`int`, *optional*, defaults to 1):
        The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
        sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
        that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
        k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
        cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
        the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
    `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
.Nr=   r&   r>   )	unsqueezert   repeat_interleaver   rI   cat)qkcossinra   unsqueeze_dim
rotary_dimq_rotq_passk_rotk_passq_embedk_embeds                r7   apply_rotary_pos_embr      s6   ( --
&C
--
&C c'SYYr]a'''
(
:
:1"
:
EC
c'SYYr]a'''
(
:
:1"
:
EC 2Jc;J;&'3
+;)<6c;J;&'3
+;)<6 {{51C78G{{51C78G ii)r2Gii)r2Gr9   c                   P  ^  \ rS rSrSrSS\S\\   4U 4S jjjr  SS\	R                  S\\	R                  \	R                  4   S\\	R                     S	\\   S
\\	R                     S\\   S\\	R                  \\	R                     \\\	R                        4   4S jjrSrU =r$ )rU      z=Multi-headed attention from 'Attention Is All You Need' paperr+   rQ   c                 <  > [         TU ]  5         Xl        X l        [	        USUR
                  UR                  -  5      U l        UR                  UR                  -  U l	        U R                  S-  U l
        UR                  U l        SU l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR                  U R                  -  UR
                  SS9U l        g )Nrz   g      Tr'   F)r)   r*   r+   rQ   getattrr.   num_attention_headsrz   rx   r   r   attention_dropout	is_causalr,   r-   attention_biasq_projk_projv_projo_projr^   s      r7   r*   Glm4Attention.__init__   s@   "
F4F4F&JdJd4de$*$>$>&B\B\$\!}}d*!'!9!9ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii : :T]] JFL^L^ejkr9   r:   rf   r`   rb   re   rg   r;   c                    UR                   S S n/ UQSPU R                  P7nU R                  U5      R                  U5      R	                  SS5      n	U R                  U5      R                  U5      R	                  SS5      n
U R                  U5      R                  U5      R	                  SS5      nUu  p[        XX5      u  pUb$  XUS.nUR                  XU R                  U5      u  p[        nU R                  R                  S:w  ad  U R                  R                  S:X  a-  UR                  SS5      (       a  [        R                  S	5        O[         U R                  R                     nU" U U	U
UU4U R"                  (       d  S
OU R$                  U R&                  S.UD6u  nnUR(                  " / UQSP76 R+                  5       nU R-                  U5      nUU4$ )Nr=   r   r&   )r   r   re   eagersdparc   Fz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )r   r   )rt   rz   r   viewr   r   r   r   updaterQ   r   r+   _attn_implementationgetloggerwarning_oncer   r   r   r   rv   r   r   )r5   r:   rf   r`   rb   re   rg   input_shapehidden_shapequery_statesr   r   r   r   cache_kwargsattention_interfacer   r   s                     r7   rC   Glm4Attention.forward   s    $))#2.88b8$--8{{=166|DNNqRST[[/44\BLLQPQR
{{=166|DNNqRST&#7RU#[ %#&nUL'5'<'<ZW[WeWegs't$J(?;;++w6{{//69fjjI\^c>d>d##L
 '>dkk>^>^&_#$7	%
  $}}C$2H2HLL	%
 	%
!\ "));;;;FFHkk+.L((r9   )r   r+   rz   r   r   rQ   r   r   r   r   r   N)NN)rE   rF   rG   rH   __doc__r   r   rn   r*   rI   ro   r   r	   rp   r   r   rC   rK   rL   rM   s   @r7   rU   rU      s    Glz lhsm l l4 +/590)||0) #5<<#=>0) !.	0)
 !0) !!1!120) -.0) 
u||Xell3XeELL>Q5RR	S0) 0)r9   rU   c                       \ rS rSrSrg)KwargsForCausalLMi&  ri   N)rE   rF   rG   rH   rK   ri   r9   r7   r   r   &  s    3r9   r   RMSNormc                   8   ^  \ rS rSrSU 4S jjrS rS rSrU =r$ )rX   i)  c                    > [         TU ]  5         [        R                  " [        R
                  " U5      5      U l        X l        g)z*
Glm4RMSNorm is equivalent to T5LayerNorm
N)r)   r*   r,   	ParameterrI   onesweightvariance_epsilon)r5   r.   rT   r6   s      r7   r*   Glm4RMSNorm.__init__+  s/     	ll5::k#:; #r9   c                    UR                   nUR                  [        R                  5      nUR	                  S5      R                  SSS9nU[        R                  " X0R                  -   5      -  nU R                  UR                  U5      -  $ )Nr&   r=   T)keepdim)	r   r   rI   r   powmeanrsqrtr   r   )r5   r:   input_dtypevariances       r7   rC   Glm4RMSNorm.forward3  sw    #))%((7 $$Q',,R,>%H?T?T4T(UU{{]--k:::r9   c                 ^    [        U R                  R                  5       SU R                   3$ )Nz, eps=)tupler   rt   r   r5   s    r7   
extra_reprGlm4RMSNorm.extra_repr:  s*    ))*+6$2G2G1HIIr9   )r   r   )gư>)	rE   rF   rG   rH   r*   rC   r   rK   rL   rM   s   @r7   rX   rX   )  s    $;J Jr9   rX   c                   l   ^  \ rS rSrSS\4U 4S jjjr\R                  " 5       \S 5       5       r	Sr
U =r$ )Glm4RotaryEmbeddingi>  r+   c                   > [         TU ]  5         [        US5      (       aH  UR                  b;  UR                  R	                  SUR                  R	                  S5      5      U l        OSU l        UR                  U l        UR                  U l        Xl	        [        U R
                     U l        U R                  U R                  U5      u  o0l        U R                  SUSS9  U R                  U l        g )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r)   r*   hasattrr   r   r   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr+   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r5   r+   devicer   r6   s       r7   r*   Glm4RotaryEmbedding.__init__?  s    6>**v/B/B/N#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q((ZeD!%r9   c                 b   U R                   S S S 2S 4   R                  5       R                  UR                  S   SS5      R	                  UR
                  5      nUS S 2S S S 24   R                  5       n[        UR
                  R                  [        5      (       a0  UR
                  R                  S:w  a  UR
                  R                  OSn[        R                  " USS9   UR                  5       UR                  5       -  R                  SS5      n[        R                  " Xf4SS	9nUR                  5       U R                  -  nUR                  5       U R                  -  n	S S S 5        WR	                  UR                   S
9W	R	                  UR                   S
94$ ! , (       d  f       N@= f)Nr   r=   r   mpscpuF)device_typeenabledr&   r>   )r   )r   floatru   rt   r   r   
isinstancer   strrI   autocastr   r   r   r   r   r   )
r5   r   ra   inv_freq_expandedposition_ids_expandedr   freqsembr   r   s
             r7   rC   Glm4RotaryEmbedding.forwardP  sR    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E'E!((--[`J`ahhmmfk^^UC&,,.1F1L1L1NNYYZ[]^_E))UN3C'')d444C'')d444C	 D vvAGGv$cff177f&;;; DCs   $BF  
F.)r   r+   r   r   r   r   r   r   )rE   rF   rG   rH   r   r*   rI   no_gradr   rC   rK   rL   rM   s   @r7   r   r   >  s6    /z / /" ]]_<  <r9   r   c                   N    \ rS rSr\rSrSrS/rS/r	Sr
SrSrSrSrSrSrS rSrg)	Glm4PreTrainedModeli`  modelTrO   past_key_valuesc                    U R                   R                  n[        U[        R                  5      (       aW  UR
                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       ad  UR
                  R                  R                  SUS9  UR                  b2  UR
                  R                  UR                     R                  5         g g [        U[        5      (       a&  UR
                  R                  R                  S5        g g )Nr   )r   stdg      ?)r+   initializer_ranger   r,   r-   r   datanormal_r(   zero_	Embeddingpadding_idxrX   fill_)r5   r|   r
  s      r7   _init_weights!Glm4PreTrainedModel._init_weightso  s    kk++fbii((MM&&CS&9{{&  &&( '--MM&&CS&9!!-""6#5#56<<> .,,MM$$S) -r9   ri   N)rE   rF   rG   rH   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr  rK   ri   r9   r7   r  r  `  sS    L&*#+,#4"5!N  $!"&*r9   r  c                     ^  \ rS rSrS\4U 4S jjrS rS r\\	         SS\
\R                     S\
\R                     S\
\R                     S	\
\   S
\
\R                     S\
\   S\
\   S\
\   S\
\R                     S\\   S\4S jj5       5       r SS\\R                  S4   S\R                  S\R                  S	\S\4
S jjr\S\R                  S\S\S\R2                  S\R                  S\4S j5       rSrU =r$ )	Glm4Modeli}  r+   c           	        > [         TU ]  U5        UR                  U l        UR                  U l        [
        R                  " UR                  UR                  U R                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[        X5      PM     sn5      U l        [        UR                  UR                  S9U l        [#        US9U l        SU l        U R)                  5         g s  snf )NrS   )r+   F)r)   r*   pad_token_idr  
vocab_sizer,   r  r.   embed_tokens
ModuleListrangenum_hidden_layersrO   layersrX   rY   normr   
rotary_embgradient_checkpointing	post_initr^   s      r7   r*   Glm4Model.__init__  s     !.. ++LL):):F<N<NPTP`P`ammBGH`H`BabBaYf0Bab
   2 28K8KL	-V<&+# 	 cs   C?c                     U R                   $ r   r%  r   s    r7   get_input_embeddingsGlm4Model.get_input_embeddings  s       r9   c                     Xl         g r   r0  r5   r   s     r7   set_input_embeddingsGlm4Model.set_input_embeddings  s    !r9   	input_idsr`   ra   r  inputs_embedsrd   rc   output_hidden_statesre   flash_attn_kwargsr;   c
                 J   Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nUS L US L-  (       a  [	        S5      eU R
                  (       a/  U R                  (       a  U(       a  [        R                  S5        Sn[        U[        S 5      [        45      (       d  [	        S5      eUc  U R                  U5      nU(       a  Uc
  [        5       nU	cD  Ub  UR                  5       OSn[        R                   " XUR"                  S   -   UR$                  S9n	Uc  U	R'                  S5      nU R)                  X%XU5      nUnU R+                  X5      nU(       a  SOS nU(       a  SOS nU R,                  S U R                   R.                    H7  nU(       a  X4-  nU" U4UUUUUU	US	.U
D6nUS   nU(       d  M.  UUS   4-  nM9     U R1                  U5      nU(       a  X4-  n[3        UU(       a  UOS UUS
9$ )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   r   ri   )r`   ra   rb   rc   rd   re   rf   )last_hidden_stater  r:   
attentions)r+   rc   r9  rd   
ValueErrorr,  r   r   r   r   r   r	   r%  r
   get_seq_lengthrI   arangert   r   r   _update_causal_maskr+  r)  r(  r*  r   )r5   r7  r`   ra   r  r8  rd   rc   r9  re   r:  past_seen_tokensr   r:   rf   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                      r7   rC   Glm4Model.forward  sI    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==Yj I /DJ+>??abb  --i8M0*nO!CRC^==?de"\\ ]5H5H5K"KTaThThN )33A6L..>L]
 & #oomJ #7BD0d![[)H4;;+H+HIM#!%55!)
*)."3#-$7
 $
M *!,M  =#3"55' J* 		-0  !11&+/8Od+%	
 	
r9   r    input_tensorc           	         U R                   R                  S:X  a  Ub  US:H  R                  5       (       a  U$ g U R                   R                  S:X  a,  [        U[        R
                  5      (       a  [        U5      nU$ Ub  UR                  5       OSnUb  UR                  OSnU R                   R                  S:X  a5  U(       d.  U(       d'  [        R                  " UUUU R                  S9(       a  g UR                  nUR                  S   n	U(       a  UR                  5       n
O5[        U[        R
                  5      (       a  UR                  S	   OXi-   S-   n
U R                  UU	U
UUUR                  S   S
9nU R                   R                  S:X  aZ  UbW  UR                   R"                  S;   a=  U(       d6  [        R$                  " U5      R&                  n[        R(                  " X5      nU$ )Nflash_attention_2r   flex_attentionr   Fr   )r8  past_key_values_lengthis_trainingr   r=   )sequence_lengthtarget_lengthr   re   
batch_size)cudaxpunpu)r+   r   anyr   rI   ro   r!   r@  is_compileabler   _ignore_causal_mask_sdpar   r   rt   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   r   finfomin_unmask_unattended)r5   r`   rI  re   r  rc   rC  using_compilable_cacher   rO  rP  r   	min_dtypes                r7   rB  Glm4Model._update_causal_mask  s    ;;++/BB)~/D.I.I.K.K%%;;++/??.%,,77!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell;; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCK[Kr9   rO  rP  r   rQ  c                    U b  U R                  5       S:X  a  U nU$ [        R                  " U5      R                  n[        R                  " X4XUR
                  S9nUS:w  a  [        R                  " USS9nU[        R                  " X$R
                  S9UR                  SS5      :  -  nUSSSS2SS24   R                  USSS5      nU b  UR                  5       nU R                  S   n	USS2SS2SS2SU	24   U SS2SSSS24   R                  UR
                  5      -   n
U
S:H  n
USS2SS2SS2SU	24   R                  X5      USS2SS2SS2SU	24'   U$ )	a  
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

Args:
    attention_mask (`torch.Tensor`):
        A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
        `(batch_size, 1, query_length, key_value_length)`.
    sequence_length (`int`):
        The sequence length being processed.
    target_length (`int`):
        The target length: when generating with static cache, the mask should be as long as the static cache,
        to account for the 0 padding, the part of the cache that is not filled yet.
    dtype (`torch.dtype`):
        The dtype to use for the 4D attention mask.
    cache_position (`torch.Tensor`):
        Indices depicting the position of the input sequence tokens in the sequence.
    batch_size (`torch.Tensor`):
        Batch size.
N   )
fill_valuer   r   r   )diagonalr<  r=   r   )r?   rI   rZ  r[  fullr   triurA  rv   ru   clonert   r   masked_fill)r`   rO  rP  r   re   rQ  rg   r   r^  mask_lengthpadding_masks              r7   rY  ?Glm4Model._prepare_4d_causal_attention_mask_with_cache_position9  s}   < %.*<*<*>!*C(K* ' E*..I** 0Y\j\q\qK !##jjqA5<<>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c 6Aq!\k\12 r9   )r%  r,  r)  r*  r  r+  r$  	NNNNNNNNN)F)rE   rF   rG   rH   r   r*   r1  r5  r   r   r   rI   rp   ro   r	   rJ   rq   r   r   r   rC   r   rB  staticmethodrn   r   rY  rK   rL   rM   s   @r7   r!  r!  }  s   z  !"  151537+/59$(,0/359\
E,,-\
 !.\
 u//0	\

 "%\
   1 12\
 D>\
 $D>\
 'tn\
 !!1!12\
 $$89\
 
!\
  \
H #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r9   r!  c                     ^  \ rS rSrS/rSS0rSS/S/40rU 4S jrS rS	 r	S
 r
S rS rS r\\           SS\\R$                     S\\R&                     S\\R$                     S\\   S\\R*                     S\\R$                     S\\   S\\   S\\   S\\R$                     S\\\R&                  4   S\\   S\\\4   4S jj5       5       rSrU =r$ )Glm4ForCausalLMiq  zlm_head.weightlm_headcolwise_repr:   logitsc                    > [         TU ]  U5        [        U5      U l        UR                  U l        [
        R                  " UR                  UR                  SS9U l        U R                  5         g NFr'   )
r)   r*   r!  r  r$  r,   r-   r.   ro  r-  r4   s     r7   r*   Glm4ForCausalLM.__init__w  sU     v&
 ++yy!3!3V5F5FUS 	r9   c                 .    U R                   R                  $ r   r  r%  r   s    r7   r1  $Glm4ForCausalLM.get_input_embeddings      zz&&&r9   c                 $    XR                   l        g r   rv  r4  s     r7   r5  $Glm4ForCausalLM.set_input_embeddings      "'

r9   c                     U R                   $ r   ro  r   s    r7   get_output_embeddings%Glm4ForCausalLM.get_output_embeddings  s    ||r9   c                     Xl         g r   r}  )r5   new_embeddingss     r7   set_output_embeddings%Glm4ForCausalLM.set_output_embeddings  s    %r9   c                     Xl         g r   r  )r5   decoders     r7   set_decoderGlm4ForCausalLM.set_decoder  s    
r9   c                     U R                   $ r   r  r   s    r7   get_decoderGlm4ForCausalLM.get_decoder  s    zzr9   r7  r`   ra   r  r8  labelsrd   rc   r9  re   logits_to_keeprg   r;   c                    Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	U R                  " SUUUUUUUU	U
S.	UD6nUR                  n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nSnUb)  U R                  " SUX`R                   R                  S.UD6n[        UUUR                  UR                  UR                  S9$ )a  
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
    (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>>> from transformers import AutoTokenizer, Glm4ForCausalLM

>>> model = Glm4ForCausalLM.from_pretrained("THUDM/GLM-4-9B-Chat-0414")
>>> tokenizer = AutoTokenizer.from_pretrained("THUDM/GLM-4-9B-Chat-0414")

>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")

>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```N)	r7  r`   ra   r  r8  rd   rc   r9  re   )rq  r  r$  lossrq  r  r:   r>  ri   )r+   rc   r9  r  r=  r   rn   slicero  loss_functionr$  r   r  r:   r>  )r5   r7  r`   ra   r  r8  r  rd   rc   r9  re   r  rg   rl   r:   slice_indicesrq  r  s                     r7   rC   Glm4ForCausalLM.forward  s   N 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0:: ,
)%+'/!5),
 ,
  118B>SV8W8W~ot4]kmA}a,?@A%%pVF{{OeOepiopD%#33!//))
 	
r9   )ro  r  r$  )NNNNNNNNNNr   ) rE   rF   rG   rH   _tied_weights_keys_tp_plan_pp_planr*   r1  r5  r~  r  r  r  r   r   r   rI   rp   ro   r	   rJ   rq   r   rn   r   r   r   r   rC   rK   rL   rM   s   @r7   rn  rn  q  s   *+=)H_-z:;H'(&  151537+/59-1$(,0/35934G
E,,-G
 !.G
 u//0	G

 "%G
   1 12G
 ))*G
 D>G
 $D>G
 'tnG
 !!1!12G
 c5<</0G
 *+G
 
u,,	-G
  G
r9   rn  a  
    The Glm4 Model transformer with a sequence classification head on top (linear layer).

    [`Glm4ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                   *  ^  \ rS rSrU 4S jrS rS r\\         SS\	\
R                     S\	\
R                     S\	\
R                     S\	\   S	\	\
R                     S
\	\
R                     S\	\   S\	\   S\	\   S\4S jj5       5       rSrU =r$ )Glm4ForSequenceClassificationi  c                    > [         TU ]  U5        UR                  U l        [        U5      U l        [
        R                  " UR                  U R                  SS9U l        U R                  5         g rs  )
r)   r*   
num_labelsr!  r  r,   r-   r.   scorer-  r4   s     r7   r*   &Glm4ForSequenceClassification.__init__  sS      ++v&
YYv114??O
 	r9   c                 .    U R                   R                  $ r   rv  r   s    r7   r1  2Glm4ForSequenceClassification.get_input_embeddings  rx  r9   c                 $    XR                   l        g r   rv  r4  s     r7   r5  2Glm4ForSequenceClassification.set_input_embeddings  r{  r9   r7  r`   ra   r  r8  r  rd   rc   r9  r;   c
                    U R                  UUUUUUUU	S9n
U
R                  nU R                  U5      nUb  UR                  S   nOUR                  S   nU R                  R
                  c  US:w  a  [        S5      eU R                  R
                  c  SnOUb  XR                  R
                  :g  R                  UR                  [        R                  5      n[        R                  " UR                  S   UR                  [        R                  S9nUU-  R                  S5      nO.Sn[        R                  U R                  R                    S35        U[        R                  " XR                  S	9U4   nSnUb  U R#                  XUU R                  S
9n[%        UUU
R&                  U
R(                  U
R*                  S9$ )e  
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
    config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
    `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
r`   ra   r  r8  rd   rc   r9  Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r=   )r   r   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r<  )rq  r  pooled_logitsr+   r  )r  r=  r  rt   r+   r#  r?  r   r   rI   int32rA  argmaxr   r   r6   rE   r  r   r  r:   r>  )r5   r7  r`   ra   r  r8  r  rd   rc   r9  transformer_outputsr:   rq  rQ  last_non_pad_tokennon_pad_masktoken_indicesr  r  s                      r7   rC   %Glm4ForSequenceClassification.forward  s   * 8<zz)%+'/!5 8B 	8
 ,==M* "+J&,,Q/J;;##+
a\]];;##+!#"%)A)AAEEfmmUZU`U`aL!LL)<V]]Z_ZeZefM"/,">!F!Fr!J!#>>**+ ,Z Z
 u||J}}MOaab%%VR_hlhshs%tD/ /??-;;*55
 	
r9   )r  r  r  rk  )rE   rF   rG   rH   r*   r1  r5  r   r   r   rI   rp   ro   r	   rJ   rq   r   rC   rK   rL   rM   s   @r7   r  r    s    '(  151537+/59-1$(,0/3A
E,,-A
 !.A
 u//0	A

 "%A
   1 12A
 ))*A
 D>A
 $D>A
 'tnA
 
*A
  A
r9   r  c                   *  ^  \ rS rSrU 4S jrS rS r\\         SS\	\
R                     S\	\
R                     S\	\
R                     S\	\   S	\	\
R                     S
\	\
R                     S\	\   S\	\   S\	\   S\4S jj5       5       rSrU =r$ )Glm4ForTokenClassificationiB  c                   > [         TU ]  U5        UR                  U l        [        U5      U l        [        USS 5      b  UR                  nO[        USS 5      b  UR                  nOSn[        R                  " U5      U l
        [        R                  " UR                  UR                  5      U l        U R                  5         g )Nclassifier_dropouthidden_dropoutg?)r)   r*   r  r!  r  r   r  r  r,   Dropoutr   r-   r.   r  r-  )r5   r+   r  r6   s      r7   r*   #Glm4ForTokenClassification.__init__D  s      ++v&
6/6B!'!:!:V-t4@!'!6!6!$zz"45YYv1163D3DE
 	r9   c                 .    U R                   R                  $ r   rv  r   s    r7   r1  /Glm4ForTokenClassification.get_input_embeddingsT  rx  r9   c                 $    XR                   l        g r   rv  r4  s     r7   r5  /Glm4ForTokenClassification.set_input_embeddingsW  r{  r9   r7  r`   ra   r  r8  r  rd   rc   r9  r;   c
                    U R                  UUUUUUUU	S9n
U
R                  nU R                  U5      nU R                  U5      nSnUb  U R	                  XU R
                  5      n[        UUU
R                  U
R                  S9$ )r  r  N)r  rq  r:   r>  )	r  r=  r   r  r  r+   r   r:   r>  )r5   r7  r`   ra   r  r8  r  rd   rc   r9  rl   sequence_outputrq  r  s                 r7   rC   "Glm4ForTokenClassification.forwardZ  s    * ,0::)%+'/!5 ,6 	,
 "33,,7O,%%fdkkBD$!//))	
 	
r9   )r   r  r  r  rk  )rE   rF   rG   rH   r*   r1  r5  r   r   r   rI   rp   ro   r	   rJ   rq   r   rC   rK   rL   rM   s   @r7   r  r  B  s     '(  151537+/59-1$(,0/3*
E,,-*
 !.*
 u//0	*

 "%*
   1 12*
 ))**
 D>*
 $D>*
 'tn*
 
*
  *
r9   r  )r  r!  rn  r  r  )r   )Nr   )Gtypingr   r   r   r   rI   torch.nnr,   activationsr   cache_utilsr	   r
   
generationr   integrationsr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   configuration_glm4r   !torch.nn.attention.flex_attentionr    integrations.flex_attentionr!   
get_loggerrE   r   Moduler#   rO   ro   rn   r{   r   r   r   r   rU   r   rX   r   r  r!  rn  r  r  __all__ri   r9   r7   <module>r     s$  , 4 3   ! . ) 7 > B 9  L F & h h *  !!;J 
		H	%)bii )$71 7t	UU\\ 	U# 	U%,, 	U& %II%<<% 
% <<	%
 U\\*% % %46'TH)BII H)V ?,j > Y'J")) J (J(<")) <D */ * *8 p# p pf i
)? i
 i
X S
$7 S
S
l C
!4 C
 C
Lr9   