
    fTh                        S SK JrJrJrJr  S SKrS SKJr  SSKJ	r	  SSK
JrJr  SSKJr  SSKJr  SSKJr  SS	KJr  SS
KJr  SSKJrJrJrJr  SSKJrJr  SSKJ r J!r!  SSK"J#r#  SSK$J%r%J&r&J'r'J(r(J)r)  SSK*J+r+  \(" 5       (       a  S SK,J-r-  SSK.J/r/  \)R`                  " \15      r2 " S S\Rf                  5      r4S\Rj                  S\6S\Rj                  4S jr7 S<S\Rf                  S\Rj                  S\Rj                  S\Rj                  S\\Rj                     S\8S \84S! jjr9S" r:S=S# jr; " S$ S%\Rf                  5      r<\" S&5       " S' S(\Rf                  5      5       r= " S) S*\Rf                  5      r> " S+ S,\5      r?\& " S- S.\!5      5       r@\& " S/ S0\@5      5       rA " S1 S2\\%5      rB\& " S3 S4\@\5      5       rC\&" S5S69 " S7 S8\@5      5       rD\& " S9 S:\@5      5       rE/ S;QrFg)>    )CallableOptionalTupleUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )	GlmConfig)	BlockMask)make_flex_block_causal_maskc                   b   ^  \ rS rSrU 4S jrS\R                  S\R                  4S jrSrU =r	$ )GlmMLP8   c                    > [         TU ]  5         Xl        [        R                  " UR
                  SUR                  -  SS9U l        [        R                  " UR                  UR
                  SS9U l        [        UR                     U l        g )N   Fbias)super__init__confignnLinearhidden_sizeintermediate_sizegate_up_proj	down_projr   
hidden_actactivation_fnselfr+   	__class__s     \/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/glm/modeling_glm.pyr*   GlmMLP.__init__9   sn    IIf&8&8!f>V>V:V]bc6#;#;V=O=OV[\#F$5$56    hidden_statesreturnc                     U R                  U5      nUR                  SSS9u  p2X R                  U5      -  nU R                  U5      $ )Nr&   dim)r0   chunkr3   r1   )r5   r:   	up_statesgates       r7   forwardGlmMLP.forwardA   sH    %%m4	#//!/4 2 24 88	~~i((r9   )r3   r+   r1   r0   )
__name__
__module____qualname____firstlineno__r*   torchFloatTensorrC   __static_attributes____classcell__r6   s   @r7   r#   r#   8   s,    7)U%6%6 )5;L;L ) )r9   r#   r:   n_repr;   c                     U R                   u  p#pEUS:X  a  U $ U SS2SS2SSS2SS24   R                  X#XU5      n U R                  X#U-  XE5      $ )z
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
r   N)shapeexpandreshape)r:   rN   batchnum_key_value_headsslenhead_dims         r7   	repeat_kvrW   J   s_    
 2?1D1D.Ez!!Qa"23::5W\dlmM  e(CTTTr9   modulequerykeyvalueattention_maskscalingdropoutc                 @   [        X R                  5      n[        X0R                  5      n	[        R                  " XR	                  SS5      5      U-  n
Ub"  US S 2S S 2S S 2S UR
                  S   24   nX-   n
[        R                  R                  U
S[        R                  S9R                  UR                  5      n
[        R                  R                  XU R                  S9n
[        R                  " X5      nUR	                  SS5      R                  5       nX4$ )Nr&   r   r=   )r?   dtype)ptrainingr   )rW   num_key_value_groupsrI   matmul	transposerP   r,   
functionalsoftmaxfloat32tora   r^   rc   
contiguous)rX   rY   rZ   r[   r\   r]   r^   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputs                r7   eager_attention_forwardrr   V   s     3 ; ;<JU$?$?@L<<';';Aq'ABWLL!$Q1.D
0@0@0D.D%DE#1==((2U]](SVVW\WbWbcL==((6??([L,,|:K''1-88:K$$r9   c                 x    U SSSS24   nU SSSS24   n[         R                  " U* U4SS9R                  S5      $ )	z*Rotates half the hidden dims of the input..r   Nr&   r   r=   r>   r`   )rI   stackflatten)xx1x2s      r7   rotate_halfry   p   sJ    	
319B	
319B;;Ryb)11"55r9   c                    UR                  U5      nUR                  U5      nUSSUR                  S   S-  24   R                  SSS9nUSSUR                  S   S-  24   R                  SSS9nUR                  S   nU SSU24   U SUS24   pUSSU24   USUS24   pXr-  [        U5      U-  -   nX-  [        U	5      U-  -   n[        R
                  " X/SS9n[        R
                  " X/SS9nX4$ )a  Applies Rotary Position Embedding to the query and key tensors.

Args:
    q (`torch.Tensor`): The query tensor.
    k (`torch.Tensor`): The key tensor.
    cos (`torch.Tensor`): The cosine part of the rotary embedding.
    sin (`torch.Tensor`): The sine part of the rotary embedding.
    position_ids (`torch.Tensor`, *optional*):
        Deprecated and unused.
    unsqueeze_dim (`int`, *optional*, defaults to 1):
        The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
        sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
        that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
        k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
        cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
        the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
    `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
.Nr=   r&   r>   )	unsqueezerP   repeat_interleavery   rI   cat)qkcossinposition_idsunsqueeze_dim
rotary_dimq_rotq_passk_rotk_passq_embedk_embeds                r7   apply_rotary_pos_embr   w   s6   ( --
&C
--
&C c'SYYr]a'''
(
:
:1"
:
EC
c'SYYr]a'''
(
:
:1"
:
EC 2Jc;J;&'3
+;)<6c;J;&'3
+;)<6 {{51C78G{{51C78G ii)r2Gii)r2Gr9   c                   P  ^  \ rS rSrSrSS\S\\   4U 4S jjjr  SS\	R                  S\\	R                  \	R                  4   S\\	R                     S	\\   S
\\	R                     S\\   S\\	R                  \\	R                     \\\	R                        4   4S jjrSrU =r$ )GlmAttention   z=Multi-headed attention from 'Attention Is All You Need' paperr+   	layer_idxc                 <  > [         TU ]  5         Xl        X l        [	        USUR
                  UR                  -  5      U l        UR                  UR                  -  U l	        U R                  S-  U l
        UR                  U l        SU l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR                  U R                  -  UR
                  SS9U l        g )NrV   g      Tr'   F)r)   r*   r+   r   getattrr.   num_attention_headsrV   rT   rd   r]   attention_dropout	is_causalr,   r-   attention_biasq_projk_projv_projo_projr5   r+   r   r6   s      r7   r*   GlmAttention.__init__   s@   "
F4F4F&JdJd4de$*$>$>&B\B\$\!}}d*!'!9!9ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii : :T]] JFL^L^ejkr9   r:   position_embeddingsr\   past_key_valuecache_positionrl   r;   c                    UR                   S S n/ UQSPU R                  P7nU R                  U5      R                  U5      R	                  SS5      n	U R                  U5      R                  U5      R	                  SS5      n
U R                  U5      R                  U5      R	                  SS5      nUu  p[        XX5      u  pUb$  XUS.nUR                  XU R                  U5      u  p[        nU R                  R                  S:w  ad  U R                  R                  S:X  a-  UR                  SS5      (       a  [        R                  S	5        O[         U R                  R                     nU" U U	U
UU4U R"                  (       d  S
OU R$                  U R&                  S.UD6u  nnUR(                  " / UQSP76 R+                  5       nU R-                  U5      nUU4$ )Nr=   r   r&   )r   r   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )r^   r]   )rP   rV   r   viewrf   r   r   r   updater   rr   r+   _attn_implementationgetloggerwarning_oncer   rc   r   r]   rR   rk   r   )r5   r:   r   r\   r   r   rl   input_shapehidden_shapequery_statesrm   rn   r   r   cache_kwargsattention_interfacerq   ro   s                     r7   rC   GlmAttention.forward   s    $))#2.88b8$--8{{=166|DNNqRST[[/44\BLLQPQR
{{=166|DNNqRST&#7RU#[ %#&nUL'5'<'<ZW[WeWegs't$J(?;;++w6{{//69fjjI\^c>d>d##L
 '>dkk>^>^&_#$7	%
  $}}C$2H2HLL	%
 	%
!\ "));;;;FFHkk+.L((r9   )r   r+   rV   r   r   r   rd   r   r   r]   r   N)NN)rE   rF   rG   rH   __doc__r   r   intr*   rI   Tensorr   r	   
LongTensorr   r   rC   rK   rL   rM   s   @r7   r   r      s    Gly lXc] l l4 +/590)||0) #5<<#=>0) !.	0)
 !0) !!1!120) -.0) 
u||Xell3XeELL>Q5RR	S0) 0)r9   r   RMSNormc                   8   ^  \ rS rSrSU 4S jjrS rS rSrU =r$ )
GlmRMSNorm   c                    > [         TU ]  5         [        R                  " [        R
                  " U5      5      U l        X l        g)z)
GlmRMSNorm is equivalent to T5LayerNorm
N)r)   r*   r,   	ParameterrI   onesweightvariance_epsilon)r5   r.   epsr6   s      r7   r*   GlmRMSNorm.__init__   s/     	ll5::k#:; #r9   c                    UR                   nUR                  [        R                  5      nUR	                  S5      R                  SSS9nU[        R                  " X0R                  -   5      -  nU R                  UR                  U5      -  $ )Nr&   r=   T)keepdim)	ra   rj   rI   ri   powmeanrsqrtr   r   )r5   r:   input_dtypevariances       r7   rC   GlmRMSNorm.forward   sw    #))%((7 $$Q',,R,>%H?T?T4T(UU{{]--k:::r9   c                 ^    [        U R                  R                  5       SU R                   3$ )Nz, eps=)tupler   rP   r   r5   s    r7   
extra_reprGlmRMSNorm.extra_repr   s*    ))*+6$2G2G1HIIr9   )r   r   )gư>)	rE   rF   rG   rH   r*   rC   r   rK   rL   rM   s   @r7   r   r      s    $;J Jr9   r   c                   l   ^  \ rS rSrSS\4U 4S jjjr\R                  " 5       \S 5       5       r	Sr
U =r$ )GlmRotaryEmbeddingi  r+   c                   > [         TU ]  5         [        US5      (       aH  UR                  b;  UR                  R	                  SUR                  R	                  S5      5      U l        OSU l        UR                  U l        UR                  U l        Xl	        [        U R
                     U l        U R                  U R                  U5      u  o0l        U R                  SUSS9  U R                  U l        g )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r)   r*   hasattrr   r   r   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr+   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r5   r+   devicer   r6   s       r7   r*   GlmRotaryEmbedding.__init__  s    6>**v/B/B/N#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q((ZeD!%r9   c                 b   U R                   S S S 2S 4   R                  5       R                  UR                  S   SS5      R	                  UR
                  5      nUS S 2S S S 24   R                  5       n[        UR
                  R                  [        5      (       a0  UR
                  R                  S:w  a  UR
                  R                  OSn[        R                  " USS9   UR                  5       UR                  5       -  R                  SS5      n[        R                  " Xf4SS	9nUR                  5       U R                  -  nUR                  5       U R                  -  n	S S S 5        WR	                  UR                   S
9W	R	                  UR                   S
94$ ! , (       d  f       N@= f)Nr   r=   r   mpscpuF)device_typeenabledr&   r>   )ra   )r   floatrQ   rP   rj   r   
isinstancer   strrI   autocastrf   r}   r   r   r   ra   )
r5   rv   r   inv_freq_expandedposition_ids_expandedr   freqsembr   r   s
             r7   rC   GlmRotaryEmbedding.forward  sR    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E'E!((--[`J`ahhmmfk^^UC&,,.1F1L1L1NNYYZ[]^_E))UN3C'')d444C'')d444C	 D vvAGGv$cff177f&;;; DCs   $BF  
F.)r   r+   r   r   r   r   r   r   )rE   rF   rG   rH   r   r*   rI   no_gradr   rC   rK   rL   rM   s   @r7   r   r     s6    /y / /" ]]_<  <r9   r   c                     ^  \ rS rSrS\S\4U 4S jjr       SS\R                  S\	\R                     S\	\R                     S\	\   S	\	\   S
\	\   S\	\R                     S\	\\R                  \R                  4      S\\   S\\R                   \	\\R                   \R                   4      4   4S jjrSrU =r$ )GlmDecoderLayeri#  r+   r   c                   > [         TU ]  5         UR                  U l        [        XS9U l        [        U5      U l        [        UR                  UR                  S9U l	        [        UR                  UR                  S9U l
        g )N)r+   r   r   )r)   r*   r.   r   	self_attnr#   mlpr   rms_norm_epsinput_layernormpost_attention_layernormr   s      r7   r*   GlmDecoderLayer.__init__$  si    !--%VI&>)&*<*<&BUBUV(263E3E6K^K^(_%r9   r:   r\   r   r   r   	use_cacher   r   rl   r;   c	                     Un
U R                  U5      nU R                  " SUUUUUUUUS.U	D6u  pX-   nUn
U R                  U5      nU R                  U5      nX-   nU4nU(       a  X4-  nU$ )N)r:   r\   r   r   r   r   r   r    )r   r   r   r   )r5   r:   r\   r   r   r   r   r   r   rl   residualself_attn_weightsoutputss                r7   rC   GlmDecoderLayer.forward.  s     !,,]; ,0>> 
,
')%)/) 3
,
 
,
( !0 !55mD/ 0 "++Gr9   )r.   r   r   r   r   )NNNFFNN)rE   rF   rG   rH   r   r   r*   rI   r   r   r   r	   boolr   r   r   rJ   rC   rK   rL   rM   s   @r7   r   r   #  s   `y `S ` 2637*.,1$)59KO'||' !.' u//0	'
 !' $D>' D>' !!1!12' &eELL%,,,F&GH' -.' 
u  (51B1BEDUDU1U+V"WW	X' 'r9   r   c                   N    \ rS rSr\rSrSrS/rS/r	Sr
SrSrSrSrSrSrS rSrg)	GlmPreTrainedModeliX  modelTr   past_key_valuesc                    U R                   R                  n[        U[        R                  5      (       aW  UR
                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       ad  UR
                  R                  R                  SUS9  UR                  b2  UR
                  R                  UR                     R                  5         g g [        U[        5      (       a&  UR
                  R                  R                  S5        g g )Nr   )r   stdg      ?)r+   initializer_ranger   r,   r-   r   datanormal_r(   zero_	Embeddingpadding_idxr   fill_)r5   rX   r  s      r7   _init_weights GlmPreTrainedModel._init_weightsg  s    kk++fbii((MM&&CS&9{{&  &&( '--MM&&CS&9!!-""6#5#56<<> .
++MM$$S) ,r9   r   N)rE   rF   rG   rH   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr  rK   r   r9   r7   r  r  X  sS    L&*#*+#4"5!N  $!"&*r9   r  c                     ^  \ rS rSrS\4U 4S jjrS rS r\\	         SS\
\R                     S\
\R                     S\
\R                     S	\
\   S
\
\R                     S\
\   S\
\   S\
\   S\
\R                     S\\   S\4S jj5       5       r SS\\R                  S4   S\R                  S\R                  S	\S\4
S jjr\S\R                  S\S\S\R2                  S\R                  S\4S j5       rSrU =r$ )GlmModeliu  r+   c           	        > [         TU ]  U5        UR                  U l        UR                  U l        [
        R                  " UR                  UR                  U R                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[        X5      PM     sn5      U l        [        UR                  UR                  S9U l        [#        US9U l        SU l        U R)                  5         g s  snf )Nr   )r+   F)r)   r*   pad_token_idr  
vocab_sizer,   r  r.   embed_tokens
ModuleListrangenum_hidden_layersr   layersr   r   normr   
rotary_embgradient_checkpointing	post_initr   s      r7   r*   GlmModel.__init__w  s     !.. ++LL):):F<N<NPTP`P`ammAFvG_G_A`aA`I_V/A`a
 v11v7J7JK	,F;&+# 	 bs   C?c                     U R                   $ r   r!  r   s    r7   get_input_embeddingsGlmModel.get_input_embeddings  s       r9   c                     Xl         g r   r,  r5   r[   s     r7   set_input_embeddingsGlmModel.set_input_embeddings  s    !r9   	input_idsr\   r   r  inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsr;   c
                 J   Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nUS L US L-  (       a  [	        S5      eU R
                  (       a/  U R                  (       a  U(       a  [        R                  S5        Sn[        U[        S 5      [        45      (       d  [	        S5      eUc  U R                  U5      nU(       a  Uc
  [        5       nU	cD  Ub  UR                  5       OSn[        R                   " XUR"                  S   -   UR$                  S9n	Uc  U	R'                  S5      nU R)                  X%XU5      nUnU R+                  X5      nU(       a  SOS nU(       a  SOS nU R,                  S U R                   R.                    H7  nU(       a  X4-  nU" U4UUUUUU	US	.U
D6nUS   nU(       d  M.  UUS   4-  nM9     U R1                  U5      nU(       a  X4-  n[3        UU(       a  UOS UUS
9$ )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   r   r   )r\   r   r   r   r   r   r   )last_hidden_stater  r:   
attentions)r+   r   r5  r   
ValueErrorr(  rc   r   r   r   r   r	   r!  r
   get_seq_lengthrI   arangerP   r   r{   _update_causal_maskr'  r%  r$  r&  r   )r5   r3  r\   r   r  r4  r   r   r5  r   r6  past_seen_tokensrp   r:   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                      r7   rC   GlmModel.forward  sI    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==Yj I /DJ+>??abb  --i8M0*nO!CRC^==?de"\\ ]5H5H5K"KTaThThN )33A6L..>L]
 & #oomJ #7BD0d![[)H4;;+H+HIM#!%55!)
*)."3#-$7
 $
M *!,M  =#3"55' J* 		-0  !11&+/8Od+%	
 	
r9   r    input_tensorc           	         U R                   R                  S:X  a  Ub  US:H  R                  5       (       a  U$ g U R                   R                  S:X  a,  [        U[        R
                  5      (       a  [        U5      nU$ Ub  UR                  5       OSnUb  UR                  OSnU R                   R                  S:X  a5  U(       d.  U(       d'  [        R                  " UUUU R                  S9(       a  g UR                  nUR                  S   n	U(       a  UR                  5       n
O5[        U[        R
                  5      (       a  UR                  S	   OXi-   S-   n
U R                  UU	U
UUUR                  S   S
9nU R                   R                  S:X  aZ  UbW  UR                   R"                  S;   a=  U(       d6  [        R$                  " U5      R&                  n[        R(                  " X5      nU$ )Nflash_attention_2r   flex_attentionr   Fr   )r4  past_key_values_lengthis_trainingr   r=   )sequence_lengthtarget_lengthra   r   
batch_size)cudaxpunpu)r+   r   anyr   rI   r   r!   r<  is_compileabler   _ignore_causal_mask_sdparc   ra   rP   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   r   finfomin_unmask_unattended)r5   r\   rE  r   r  r   r?  using_compilable_cachera   rK  rL  rp   	min_dtypes                r7   r>  GlmModel._update_causal_mask  s    ;;++/BB)~/D.I.I.K.K%%;;++/??.%,,77!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell;; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCK[Kr9   rK  rL  ra   rM  c                    U b  U R                  5       S:X  a  U nU$ [        R                  " U5      R                  n[        R                  " X4XUR
                  S9nUS:w  a  [        R                  " USS9nU[        R                  " X$R
                  S9UR                  SS5      :  -  nUSSSS2SS24   R                  USSS5      nU b  UR                  5       nU R                  S   n	USS2SS2SS2SU	24   U SS2SSSS24   R                  UR
                  5      -   n
U
S:H  n
USS2SS2SS2SU	24   R                  X5      USS2SS2SS2SU	24'   U$ )	a  
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

Args:
    attention_mask (`torch.Tensor`):
        A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
        `(batch_size, 1, query_length, key_value_length)`.
    sequence_length (`int`):
        The sequence length being processed.
    target_length (`int`):
        The target length: when generating with static cache, the mask should be as long as the static cache,
        to account for the 0 padding, the part of the cache that is not filled yet.
    dtype (`torch.dtype`):
        The dtype to use for the 4D attention mask.
    cache_position (`torch.Tensor`):
        Indices depicting the position of the input sequence tokens in the sequence.
    batch_size (`torch.Tensor`):
        Batch size.
N   )
fill_valuera   r   r   )diagonalr8  r=   r   )r?   rI   rV  rW  fullr   triur=  rR   rQ   clonerP   rj   masked_fill)r\   rK  rL  ra   r   rM  rl   rp   rZ  mask_lengthpadding_masks              r7   rU  >GlmModel._prepare_4d_causal_attention_mask_with_cache_position1  s}   < %.*<*<*>!*C(K* ' E*..I** 0Y\j\q\qK !##jjqA5<<>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c 6Aq!\k\12 r9   )r!  r(  r%  r&  r  r'  r   	NNNNNNNNN)F)rE   rF   rG   rH   r   r*   r-  r1  r   r   r   rI   r   r   r	   rJ   r   r   r   r   rC   r   r>  staticmethodr   ra   rU  rK   rL   rM   s   @r7   r  r  u  s   y  !"  151537+/59$(,0/359\
E,,-\
 !.\
 u//0	\

 "%\
   1 12\
 D>\
 $D>\
 'tn\
 !!1!12\
 $$89\
 
!\
  \
H #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r9   r  c                       \ rS rSrSrg)KwargsForCausalLMii  r   N)rE   rF   rG   rH   rK   r   r9   r7   rj  rj  i  s    3r9   rj  c                     ^  \ rS rSrS/rSS0rSS/S/40rU 4S jrS rS	 r	S
 r
S rS rS r\\           SS\\R$                     S\\R&                     S\\R$                     S\\   S\\R*                     S\\R$                     S\\   S\\   S\\   S\\R$                     S\\\R&                  4   S\\   S\4S jj5       5       rSrU =r$ )GlmForCausalLMil  zlm_head.weightlm_headcolwise_repr:   logitsc                    > [         TU ]  U5        [        U5      U l        UR                  U l        [
        R                  " UR                  UR                  SS9U l        U R                  5         g NFr'   )
r)   r*   r  r  r   r,   r-   r.   rm  r)  r4   s     r7   r*   GlmForCausalLM.__init__r  sU     f%
 ++yy!3!3V5F5FUS 	r9   c                 .    U R                   R                  $ r   r  r!  r   s    r7   r-  #GlmForCausalLM.get_input_embeddings{      zz&&&r9   c                 $    XR                   l        g r   rt  r0  s     r7   r1  #GlmForCausalLM.set_input_embeddings~      "'

r9   c                     U R                   $ r   rm  r   s    r7   get_output_embeddings$GlmForCausalLM.get_output_embeddings  s    ||r9   c                     Xl         g r   r{  )r5   new_embeddingss     r7   set_output_embeddings$GlmForCausalLM.set_output_embeddings  s    %r9   c                     Xl         g r   r  )r5   decoders     r7   set_decoderGlmForCausalLM.set_decoder  s    
r9   c                     U R                   $ r   r  r   s    r7   get_decoderGlmForCausalLM.get_decoder  s    zzr9   r3  r\   r   r  r4  labelsr   r   r5  r   logits_to_keeprl   r;   c                    Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	U R                  " SUUUUUUUU	U
S.	UD6nUR                  n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nSnUb)  U R                  " SUX`R                   R                  S.UD6n[        UUUR                  UR                  UR                  S9$ )a  
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
    (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>>> from transformers import AutoTokenizer, GlmForCausalLM

>>> model = GlmForCausalLM.from_pretrained("meta-glm/Glm-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-glm/Glm-2-7b-hf")

>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")

>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```N)	r3  r\   r   r  r4  r   r   r5  r   )ro  r  r   lossro  r  r:   r:  r   )r+   r   r5  r  r9  r   r   slicerm  loss_functionr   r   r  r:   r:  )r5   r3  r\   r   r  r4  r  r   r   r5  r   r  rl   r   r:   slice_indicesro  r  s                     r7   rC   GlmForCausalLM.forward  s   N 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0:: ,
)%+'/!5),
 ,
  118B>SV8W8W~ot4]kmA}a,?@A%%pVF{{OeOepiopD%#33!//))
 	
r9   )rm  r  r   )NNNNNNNNNNr   )rE   rF   rG   rH   _tied_weights_keys_tp_plan_pp_planr*   r-  r1  r|  r  r  r  r   r   r   rI   r   r   r	   rJ   r   r   r   r   rj  r   rC   rK   rL   rM   s   @r7   rl  rl  l  s   *+=)H_-z:;H'(&  151537+/59-1$(,0/35934G
E,,-G
 !.G
 u//0	G

 "%G
   1 12G
 ))*G
 D>G
 $D>G
 'tnG
 !!1!12G
 c5<</0G
 *+G
 
 G
  G
r9   rl  a  
    The Glm Model transformer with a sequence classification head on top (linear layer).

    [`GlmForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                   *  ^  \ rS rSrU 4S jrS rS r\\         SS\	\
R                     S\	\
R                     S\	\
R                     S\	\   S	\	\
R                     S
\	\
R                     S\	\   S\	\   S\	\   S\4S jj5       5       rSrU =r$ )GlmForSequenceClassificationi  c                    > [         TU ]  U5        UR                  U l        [        U5      U l        [
        R                  " UR                  U R                  SS9U l        U R                  5         g rq  )
r)   r*   
num_labelsr  r  r,   r-   r.   scorer)  r4   s     r7   r*   %GlmForSequenceClassification.__init__  sS      ++f%
YYv114??O
 	r9   c                 .    U R                   R                  $ r   rt  r   s    r7   r-  1GlmForSequenceClassification.get_input_embeddings  rv  r9   c                 $    XR                   l        g r   rt  r0  s     r7   r1  1GlmForSequenceClassification.set_input_embeddings  ry  r9   r3  r\   r   r  r4  r  r   r   r5  r;   c
                    U R                  UUUUUUUU	S9n
U
R                  nU R                  U5      nUb  UR                  S   nOUR                  S   nU R                  R
                  c  US:w  a  [        S5      eU R                  R
                  c  SnOUb  XR                  R
                  :g  R                  UR                  [        R                  5      n[        R                  " UR                  S   UR                  [        R                  S9nUU-  R                  S5      nO.Sn[        R                  U R                  R                    S35        U[        R                  " XR                  S	9U4   nSnUb  U R#                  XUU R                  S
9n[%        UUU
R&                  U
R(                  U
R*                  S9$ )e  
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
    config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
    `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
r\   r   r  r4  r   r   r5  Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r=   )r   ra   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r8  )ro  r  pooled_logitsr+   r  )r  r9  r  rP   r+   r  r;  rj   r   rI   int32r=  argmaxr   r   r6   rE   r  r   r  r:   r:  )r5   r3  r\   r   r  r4  r  r   r   r5  transformer_outputsr:   ro  rM  last_non_pad_tokennon_pad_masktoken_indicesr  r  s                      r7   rC   $GlmForSequenceClassification.forward  s   * 8<zz)%+'/!5 8B 	8
 ,==M* "+J&,,Q/J;;##+
a\]];;##+!#"%)A)AAEEfmmUZU`U`aL!LL)<V]]Z_ZeZefM"/,">!F!Fr!J!#>>**+ ,Z Z
 u||J}}MOaab%%VR_hlhshs%tD/ /??-;;*55
 	
r9   )r  r  r  rg  )rE   rF   rG   rH   r*   r-  r1  r   r   r   rI   r   r   r	   rJ   r   r   rC   rK   rL   rM   s   @r7   r  r    s    '(  151537+/59-1$(,0/3A
E,,-A
 !.A
 u//0	A

 "%A
   1 12A
 ))*A
 D>A
 $D>A
 'tnA
 
*A
  A
r9   r  c                   *  ^  \ rS rSrU 4S jrS rS r\\         SS\	\
R                     S\	\
R                     S\	\
R                     S\	\   S	\	\
R                     S
\	\
R                     S\	\   S\	\   S\	\   S\4S jj5       5       rSrU =r$ )GlmForTokenClassificationi=  c                   > [         TU ]  U5        UR                  U l        [        U5      U l        [        USS 5      b  UR                  nO[        USS 5      b  UR                  nOSn[        R                  " U5      U l
        [        R                  " UR                  UR                  5      U l        U R                  5         g )Nclassifier_dropouthidden_dropoutg?)r)   r*   r  r  r  r   r  r  r,   Dropoutr^   r-   r.   r  r)  )r5   r+   r  r6   s      r7   r*   "GlmForTokenClassification.__init__?  s      ++f%
6/6B!'!:!:V-t4@!'!6!6!$zz"45YYv1163D3DE
 	r9   c                 .    U R                   R                  $ r   rt  r   s    r7   r-  .GlmForTokenClassification.get_input_embeddingsO  rv  r9   c                 $    XR                   l        g r   rt  r0  s     r7   r1  .GlmForTokenClassification.set_input_embeddingsR  ry  r9   r3  r\   r   r  r4  r  r   r   r5  r;   c
                    U R                  UUUUUUUU	S9n
U
R                  nU R                  U5      nU R                  U5      nSnUb  U R	                  XU R
                  5      n[        UUU
R                  U
R                  S9$ )r  r  N)r  ro  r:   r:  )	r  r9  r^   r  r  r+   r   r:   r:  )r5   r3  r\   r   r  r4  r  r   r   r5  r   sequence_outputro  r  s                 r7   rC   !GlmForTokenClassification.forwardU  s    * ,0::)%+'/!5 ,6 	,
 "33,,7O,%%fdkkBD$!//))	
 	
r9   )r^   r  r  r  rg  )rE   rF   rG   rH   r*   r-  r1  r   r   r   rI   r   r   r	   rJ   r   r   rC   rK   rL   rM   s   @r7   r  r  =  s     '(  151537+/59-1$(,0/3*
E,,-*
 !.*
 u//0	*

 "%*
   1 12*
 ))**
 D>*
 $D>*
 'tn*
 
*
  *
r9   r  )r  r  rl  r  r  )r   )Nr   )Gtypingr   r   r   r   rI   torch.nnr,   activationsr   cache_utilsr	   r
   
generationr   integrationsr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   configuration_glmr   !torch.nn.attention.flex_attentionr    integrations.flex_attentionr!   
get_loggerrE   r   Moduler#   r   r   rW   r   rr   ry   r   r   r   r   r   r  r  rj  rl  r  r  __all__r   r9   r7   <module>r     s$  , 4 3   ! . ) 7 > B 9  L F & h h (  !!;J 
		H	%)RYY )$	UU\\ 	U# 	U%,, 	U& %II%<<% 
% <<	%
 U\\*% % %46'TH)299 H)V Y'J J (J(< <D20 2j * * *8 p! p pf ?,j > i
' i
 i
X S
#5 S
S
l C
 2 C
 C
Lr9   