
    fThy                        S r SSKrSSKJrJrJrJrJrJr  SSK	r
SSKrSSKJr  SSKJrJrJr  SSKJr  SSKJr  SS	KJr  SS
KJr  SSKJrJr  SSKJrJrJrJ r J!r!J"r"  SSK#J$r$  SSK%J&r&J'r'J(r(J)r)  SSK*J+r+J,r,  SSK-J.r.  \" 5       (       a  SSKJ/r/  \,R`                  " \15      r2S\3S\3S\Rh                  4S jr5S\3S\3S\Rh                  4S jr6 " S S\Rn                  5      r8 " S S\Rn                  5      r9 " S S\95      r: " S S\95      r; " S  S!\Rn                  5      r<\9\:\;S".r= " S# S$\Rn                  5      r> " S% S&\Rn                  5      r?\+ " S' S(\$5      5       r@\+ " S) S*\@5      5       rA\+" S+S,9 " S- S.\@5      5       rB\+" S/S,9 " S0 S1\@5      5       rC\+ " S2 S3\@5      5       rD\+ " S4 S5\@5      5       rE\+ " S6 S7\@5      5       rF/ S8QrGg)9z
PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
    N)DictListOptionalSetTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )get_activation)PretrainedConfig)is_deepspeed_zero3_enabled)#_prepare_4d_attention_mask_for_sdpa)!flash_attn_supports_top_left_maskis_flash_attn_available)BaseModelOutputMaskedLMOutputMultipleChoiceModelOutputQuestionAnsweringModelOutputSequenceClassifierOutputTokenClassifierOutput)PreTrainedModel)apply_chunking_to_forward find_pruneable_heads_and_indices"is_torch_greater_or_equal_than_2_2prune_linear_layer)auto_docstringlogging   )DistilBertConfig)_flash_attention_forwardn_posdimoutc                     [        5       (       aT  SS KnUR                  R                  USS9   [        R
                  R                  5       S:X  a
  [        XUS9  S S S 5        g [        XUS9  g ! , (       d  f       g = f)Nr   )modifier_rankr$   r%   r&   )r   	deepspeedzeroGatheredParameterstorchdistributedget_rank_create_sinusoidal_embeddings)r$   r%   r&   r*   s       j/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/distilbert/modeling_distilbert.pycreate_sinusoidal_embeddingsr2   B   se    !##^^..s!.D  ))+q0-EL ED 	&ED	 EDs   -A//
A=c                    [         R                  " [        U 5       VVs/ s H?  n[        U5       Vs/ s H%  oC[         R                  " SSUS-  -  U-  5      -  PM'     snPMA     snn5      nSUl        [
        R                  " [         R                  " US S 2SS S24   5      5      US S 2SS S24'   [
        R                  " [         R                  " US S 2SS S24   5      5      US S 2SS S24'   UR                  5         g s  snf s  snnf )Ni'     Fr   r!   )
nparrayrangepowerrequires_gradr-   FloatTensorsincosdetach_)r$   r%   r&   posjposition_encs         r1   r0   r0   M   s    88hmnshtuhtadQVWZQ[\Q[ABHHUAaL34F$GGQ[\htuvLC$$RVVLADqD,A%BCC14a4L$$RVVLADqD,A%BCC14a4LKKM	 ]us   D
,DD
D
c                      ^  \ rS rSrS\4U 4S jjrS	S\R                  S\\R                     S\R                  4S jjr	Sr
U =r$ )

EmbeddingsU   configc                   > [         TU ]  5         [        R                  " UR                  UR
                  UR                  S9U l        [        R                  " UR                  UR
                  5      U l	        [        R                  " UR
                  SS9U l
        [        R                  " UR                  5      U l        U R                  S[        R                  " UR                  5      R!                  S5      SS9  g )N)padding_idx-q=epsposition_ids)r!   F)
persistent)super__init__r	   	Embedding
vocab_sizer%   pad_token_idword_embeddingsmax_position_embeddingsposition_embeddings	LayerNormDropoutdropoutregister_bufferr-   arangeexpandselfrD   	__class__s     r1   rN   Embeddings.__init__V   s    !||F,=,=vzzW]WjWjk#%<<0N0NPVPZPZ#[ fjje<zz&..1ELL)G)GHOOPWXej 	 	
    	input_idsinput_embedsreturnc                    Ub  U R                  U5      nUR                  S5      n[        U S5      (       a  U R                  SS2SU24   nON[        R
                  " U[        R                  UR                  S9nUR                  S5      R                  U5      nU R                  U5      nX%-   nU R                  U5      nU R                  U5      nU$ )am  
Parameters:
    input_ids (torch.Tensor):
        torch.tensor(bs, max_seq_length) The token ids to embed.
    input_embeds (*optional*, torch.Tensor):
        The pre-computed word embeddings. Can only be passed if the input ids are `None`.


Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
embeddings)
Nr!   rJ   )dtypedevicer   )rR   sizehasattrrJ   r-   rY   longre   	unsqueeze	expand_asrT   rU   rW   )r\   r`   ra   
seq_lengthrJ   rT   
embeddingss          r1   forwardEmbeddings.forwarda   s      //	:L!&&q)

 4((,,Q^<L <<
%**YM]M]^L'11!4>>yIL"66|D!7
^^J/
\\*-
r_   )rU   rW   rT   rR   N)__name__
__module____qualname____firstlineno__r   rN   r-   Tensorr   rm   __static_attributes____classcell__r]   s   @r1   rB   rB   U   sC    	
/ 	
 Xell=S _d_k_k  r_   rB   c                      ^  \ rS rSrS\4U 4S jjrS\\   4S jr  SS\	R                  S\	R                  S\	R                  S	\	R                  S
\\	R                     S\S\\	R                  S4   4S jjrSrU =r$ )MultiHeadSelfAttention   rD   c                   > [         TU ]  5         Xl        UR                  U l        UR                  U l        [
        R                  " UR                  S9U l        SU l	        U R                  U R                  -  S:w  a&  [        SU R                   SU R                   S35      e[
        R                  " UR                  UR                  S9U l        [
        R                  " UR                  UR                  S9U l        [
        R                  " UR                  UR                  S9U l        [
        R                  " UR                  UR                  S9U l        [!        5       U l        U R                  U R                  -  U l        g )NpFr   zself.n_heads: z must divide self.dim:  evenlyin_featuresout_features)rM   rN   rD   n_headsr%   r	   rV   attention_dropoutrW   	is_causal
ValueErrorLinearq_link_linv_linout_linsetpruned_headsattention_head_sizer[   s     r1   rN   MultiHeadSelfAttention.__init__   s   ~~::zzF$<$<= 88dll"a'~dll^;RSWS[S[R\\cdeeYY6::FJJO
YY6::FJJO
YY6::FJJO
yyVZZfjjQ&)e#'88t||#; r_   headsc                 
   [        U5      S:X  a  g [        XR                  U R                  U R                  5      u  p[        U R                  U5      U l        [        U R                  U5      U l        [        U R                  U5      U l        [        U R                  USS9U l	        U R                  [        U5      -
  U l        U R                  U R                  -  U l
        U R                  R                  U5      U l        g )Nr   r!   r%   )lenr   r   r   r   r   r   r   r   r   r%   union)r\   r   indexs      r1   prune_heads"MultiHeadSelfAttention.prune_heads   s    u:?7<<!9!94;L;L
 (

E:
'

E:
'

E:
)$,,1E||c%j0++dll: --33E:r_   querykeyvaluemask	head_maskoutput_attentionsrb   .c                 
  ^ ^^ UR                  5       u  mpxUR                  S5      n	T R                  T R                  -  mTSSU	4n
S[        R                  S[        R                  4UUU 4S jjnS[        R                  S[        R                  4UUU 4S jjnU" T R                  U5      5      nU" T R                  U5      5      nU" T R                  U5      5      nU[        R                  " T5      -  n[        R                  " XR                  SS5      5      nUS:H  R                  U
5      R                  U5      nUR                  U[        R                  " [        R                   " UR"                  5      R$                  5      5      n[&        R(                  R+                  US	S
9nT R-                  U5      nUb  UU-  n[        R                  " UU5      nU" U5      nT R/                  U5      nU(       a  UU4$ U4$ )  
Parameters:
    query: torch.tensor(bs, seq_length, dim)
    key: torch.tensor(bs, seq_length, dim)
    value: torch.tensor(bs, seq_length, dim)
    mask: torch.tensor(bs, seq_length)

Returns:
    weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
    seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
r!   xrb   c                 `   > U R                  TSTR                  T5      R                  SS5      $ separate headsrK   r!   r4   viewr   	transposer   bsdim_per_headr\   s    r1   shape-MultiHeadSelfAttention.forward.<locals>.shape   s)    66"b$,,=GG1MMr_   c                    > U R                  SS5      R                  5       R                  TSTR                  T-  5      $ zgroup headsr!   r4   rK   r   
contiguousr   r   r   s    r1   unshape/MultiHeadSelfAttention.forward.<locals>.unshape   s5    ;;q!$//166r2t||l?Z[[r_   r4   r   r   rK   r   )rf   r%   r   r-   rt   r   r   r   mathsqrtmatmulr   r   rj   masked_filltensorfinford   minr	   
functionalsoftmaxrW   r   )r\   r   r   r   r   r   r   q_lengthr%   k_length
mask_reshpr   r   qkvscoresweightscontextr   r   s   `                  @@r1   rm   MultiHeadSelfAttention.forward   s   ( "JJLH88A; xx4<</!Q)
	NU\\ 	Nell 	N 	N	\u|| 	\ 	\ 	\ $**U#$$**S/"$**U#$		,''aQ!23	
+55f=##%,,u{{6<<8<<=
 --''B'7,,w'  	)G,,w*'",,w'W%%:r_   )r   rD   r%   rW   r   r   r   r   r   r   r   NF)rp   rq   rr   rs   r   rN   r   intr   r-   rt   r   boolr   rm   ru   rv   rw   s   @r1   ry   ry      s    </ <,;c ;, -1"'>||> \\> ||	>
 ll> ELL)>  > 
u||S 	!> >r_   ry   c                      ^  \ rS rSrSrU 4S jr  SS\R                  S\R                  S\R                  S\R                  S\\R                     S	\	S
\
\R                  S4   4S jjrSrU =r$ )DistilBertFlashAttention2   aC  
DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
API of flash attention and deal with padding tokens in case the input contains any of them.
c                 D   > [         TU ]  " U0 UD6  [        5       U l        g ro   )rM   rN   r   _flash_attn_uses_top_left_mask)r\   argskwargsr]   s      r1   rN   "DistilBertFlashAttention2.__init__   s#    $)&)
 /P.Q+r_   r   r   r   r   r   r   rb   .c                 .  ^ ^^ UR                  5       u  mpxT R                  T R                  -  mS[        R                  S[        R                  4UUU 4S jjn	U	" T R                  U5      5      n
U	" T R                  U5      5      nU	" T R                  U5      5      nT R                  (       a  T R                  R                  OSnU
R                  [        R                  :X  a  [        R                  " 5       (       a  [        R                  " 5       nOR[        T R                  S5      (       a  T R                  R                   nO T R
                  R"                  R                  n[$        R'                  SU S35        U
R)                  U5      n
UR)                  U5      nUR)                  U5      n[+        U
UUUUUT R,                  T R.                  S9nUR1                  TUT R                  T-  5      nT R3                  U5      nU(       a  UU4$ U4$ )	r   r   rb   c                 @   > U R                  TSTR                  T5      $ )r   rK   )r   r   r   
batch_sizer   r\   s    r1   reshape2DistilBertFlashAttention2.forward.<locals>.reshape  s    66*b$,,EEr_           _pre_quantization_dtypezThe input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in .)rW   use_top_left_maskr   )rf   r%   r   r-   rt   r   r   r   trainingrD   r   rd   float32is_autocast_enabledget_autocast_gpu_dtyperg   r   weightloggerwarning_oncetor#   r   r   r   r   )r\   r   r   r   r   r   r   r   r%   r   query_states
key_statesvalue_statesattn_dropouttarget_dtypeattn_weightsattn_weights_reshapedattn_outputr   r   s   `                 @@r1   rm   !DistilBertFlashAttention2.forward   s   ( %*JJL!
Hxx4<</	Fu|| 	F 	F 	F tzz%01TZZ_-
tzz%018<t{{443 .((**$;;=&?@@#{{BB#zz0066 >$ (??<8L#|4J'??<8L/ "AAnn	
 !- 4 4Z4<<ZfKf gll#89..>!r_   )r   r   )rp   rq   rr   rs   __doc__rN   r-   rt   r   r   r   rm   ru   rv   rw   s   @r1   r   r      s    R -1"'N"||N" \\N" ||	N"
 llN" ELL)N"  N" 
u||S 	!N" N"r_   r   c                      ^  \ rS rSrS\4U 4S jjr  SS\R                  S\R                  S\R                  S\R                  S\\R                     S	\	S
\
\R                  S4   4U 4S jjjrSrU =r$ )DistilBertSdpaAttentioniK  rD   c                 b   > [         TU ]  US9  UR                  U l        [        (       + U l        g )N)rD   )rM   rN   r   dropout_probr   require_contiguous_qkvr[   s     r1   rN    DistilBertSdpaAttention.__init__L  s,    '"44*L&L#r_   r   r   r   r   r   r   rb   .c           	        >^ ^^ U(       d  Ub)  [         R                  S5        [        TT ]  UUUUUU5      $ UR	                  5       u  m  nT R
                  T R                  -  mS[        R                  S[        R                  4UUU 4S jjnS[        R                  S[        R                  4UUU 4S jjn	U" T R                  U5      5      n
U" T R                  U5      5      nU" T R                  U5      5      nT R                  (       aM  U
R                  R                  S:X  a3  Ub0  U
R                  5       n
UR                  5       nUR                  5       n[        R                   R"                  R%                  U
UUUT R&                  (       a  T R(                  OSSS	9nU	" U5      nT R+                  U5      nU4$ )
r   a  DistilBertSdpaAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.r   rb   c                 `   > U R                  TSTR                  T5      R                  SS5      $ r   r   r   s    r1   r   .DistilBertSdpaAttention.forward.<locals>.shapex  s+    66*b$,,EOOPQSTUUr_   c                    > U R                  SS5      R                  5       R                  TSTR                  T-  5      $ r   r   r   s    r1   r   0DistilBertSdpaAttention.forward.<locals>.unshape|  s6    ;;q!$//166z2t||VbGbccr_   cudar   F)	attn_mask	dropout_pr   )r   r   rM   rm   rf   r%   r   r-   rt   r   r   r   r   re   typer   r	   r   scaled_dot_product_attentionr   r   r   )r\   r   r   r   r   r   r   _r   r   r   r   r   r   r   r   r]   s   `             @@r1   rm   DistilBertSdpaAttention.forwardQ  s   ( 	 5d 7?!  !::<
Aqxx4<</	VU\\ 	Vell 	V 	V	du|| 	d 	d 	d $**U#$$**S/"$**U#$
 &&188==F+BtGWAAAhh))FF+/==d''c G 
 k*ll;/~r_   )r   r   r   rp   rq   rr   rs   r   rN   r-   rt   r   r   r   rm   ru   rv   rw   s   @r1   r   r   K  s    M/ M -1"'G||G \\G ||	G
 llG ELL)G  G 
u||S 	!G Gr_   r   c                      ^  \ rS rSrS\4U 4S jjrS\R                  S\R                  4S jrS\R                  S\R                  4S jr	Sr
U =r$ )	FFNi  rD   c                   > [         TU ]  5         [        R                  " UR                  S9U l        UR
                  U l        SU l        [        R                  " UR                  UR                  S9U l
        [        R                  " UR                  UR                  S9U l        [        UR                  5      U l        g )Nr|   r!   r   )rM   rN   r	   rV   rW   chunk_size_feed_forwardseq_len_dimr   r%   
hidden_dimlin1lin2r   
activationr[   s     r1   rN   FFN.__init__  s    zzFNN3'-'E'E$II&**6CTCTU	II&*;*;&**U	():):;r_   inputrb   c                 Z    [        U R                  U R                  U R                  U5      $ ro   )r   ff_chunkr  r  )r\   r  s     r1   rm   FFN.forward  s%    (8T8TVZVfVfhmnnr_   c                     U R                  U5      nU R                  U5      nU R                  U5      nU R                  U5      nU$ ro   )r  r  r  rW   )r\   r  r   s      r1   r
  FFN.ff_chunk  s=    IIeOOAIIaLLLOr_   )r  r  rW   r  r  r  )rp   rq   rr   rs   r   rN   r-   rt   rm   r
  ru   rv   rw   s   @r1   r   r     sN    </ <oU\\ oell oell u||  r_   r   )eagerflash_attention_2sdpac                      ^  \ rS rSrS\4U 4S jjr   SS\R                  S\\R                     S\\R                     S\	S\
\R                  S	4   4
S
 jjrSrU =r$ )TransformerBlocki  rD   c                   > [         TU ]  5         UR                  UR                  -  S:w  a&  [	        SUR                   SUR                   S35      e[
        UR                     " U5      U l        [        R                  " UR                  SS9U l
        [        U5      U l        [        R                  " UR                  SS9U l        g )Nr   zconfig.n_heads z must divide config.dim r~   rG   )normalized_shaperI   )rM   rN   r%   r   r   DISTILBERT_ATTENTION_CLASSES_attn_implementation	attentionr	   rU   sa_layer_normr   ffnoutput_layer_normr[   s     r1   rN   TransformerBlock.__init__  s     ::&!+v~~.>>VW]WaWaVbbijkk5f6Q6QRSYZ\\6::5Qv;!#vzzu!Ur_   r   r   r   r   rb   .c           	      2   U R                  UUUUUUS9nU(       a  Uu  pVO/[        U5      [        La  [        S[        U5       S35      eUS   nU R	                  XQ-   5      nU R                  U5      nU R                  Xu-   5      nU4nU(       a  W4U-   nU$ )a-  
Parameters:
    x: torch.tensor(bs, seq_length, dim)
    attn_mask: torch.tensor(bs, seq_length)

Returns:
    sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
    torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
)r   r   r   r   r   r   z$sa_output must be a tuple but it is z typer   )r  r   tuple	TypeErrorr  r  r  )	r\   r   r   r   r   	sa_output
sa_weights
ffn_outputoutputs	            r1   rm   TransformerBlock.forward  s    " NN/ # 
	 $-!IzIe+"FtIFWW\ ]^^!!I&&y}5	 XXi(
#'#9#9*:P#Q
 ]V+Fr_   )r  r  r  r  )NNFr   rw   s   @r1   r  r    s{    V/ V  -1,0"')<<) ELL)) ELL)	)
  ) 
u||S 	!) )r_   r  c                      ^  \ rS rSrS\4U 4S jjr     SS\R                  S\\R                     S\\R                     S\	S\	S	\\	   S
\
\\\R                  S4   4   4S jjrSrU =r$ )Transformeri  rD   c                    > [         TU ]  5         UR                  U l        [        R                  " [        UR                  5       Vs/ s H  n[        U5      PM     sn5      U l        SU l        g s  snf r   )	rM   rN   n_layersr	   
ModuleListr7   r  layergradient_checkpointing)r\   rD   r   r]   s      r1   rN   Transformer.__init__  sW    ]]eFOOF\#]F\$4V$<F\#]^
&+# $^s   A1r   r   r   r   output_hidden_statesreturn_dictrb   .c                 R   U(       a  SOSnU(       a  SOSnUn	[        U R                  5       H  u  pU(       a  Xy4-   nU R                  (       a3  U R                  (       a"  U R	                  UR
                  U	UX:   U5      nOU" U	UX:   U5      nUS   n	U(       a2  [        U5      S:w  a  [        S[        U5       35      eUS   nX4-   nM  [        U5      S:w  d  M  [        S[        U5       35      e   U(       a  Xy4-   nU(       d  [        S	 XU4 5       5      $ [        XUS
9$ )a  
Parameters:
    x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
    attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.

Returns:
    hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
    layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
        Tuple of length n_layers with the hidden states from each layer.
        Optional: only if output_hidden_states=True
    all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
        Tuple of length n_layers with the attention weights from each layer
        Optional: only if output_attentions=True
 NrK   r4   z7The length of the layer_outputs should be 2, but it is r   r!   z7The length of the layer_outputs should be 1, but it is c              3   .   #    U  H  oc  M  Uv   M     g 7fro   r/  ).0r   s     r1   	<genexpr>&Transformer.forward.<locals>.<genexpr>8  s     g$Uq$Us   	)last_hidden_statehidden_states
attentions)
	enumerater)  r*  r   _gradient_checkpointing_func__call__r   r   r  r   )r\   r   r   r   r   r,  r-  all_hidden_statesall_attentionshidden_stateilayer_modulelayer_outputsr6  s                 r1   rm   Transformer.forward  s?   . #7BD0d(4OA#$5$G!**t}} $ A A )) L%! !- L%	! ),L }%*$'^_bcp_q^r%stt*1-
!/-!?}%*$'^_bcp_q^r%stt=  5B   1O Cg\n$Uggg*Xf
 	
r_   )r*  r)  r'  )NNFFN)rp   rq   rr   rs   r   rN   r-   rt   r   r   r   r   r   rm   ru   rv   rw   s   @r1   r%  r%    s    ,/ , -1,0"'%*&*C
<<C
 ELL)C
 ELL)	C

  C
 #C
 d^C
 
ellC&7 88	9C
 C
r_   r%  c                   N    \ rS rSr\rSrSrSrSr	Sr
S\R                  4S jrSrg)DistilBertPreTrainedModeli?  N
distilbertTmodulec                    [        U[        R                  5      (       ak  UR                  R                  R                  SU R                  R                  S9  UR                  b%  UR                  R                  R                  5         gg[        U[        R                  5      (       ax  UR                  R                  R                  SU R                  R                  S9  UR                  b2  UR                  R                  UR                     R                  5         gg[        U[        R                  5      (       aJ  UR                  R                  R                  5         UR                  R                  R                  S5        g[        U[        5      (       af  U R                  R                  (       aJ  [!        U R                  R"                  U R                  R$                  UR&                  R                  5        ggg)zInitialize the weights.r   )meanstdNg      ?)
isinstancer	   r   r   datanormal_rD   initializer_rangebiaszero_rO   rF   rU   fill_rB   sinusoidal_pos_embdsr2   rS   r%   rT   )r\   rD  s     r1   _init_weights'DistilBertPreTrainedModel._init_weightsH  sY   fbii(( MM&&CT[[5R5R&S{{&  &&( '--MM&&CT[[5R5R&S!!-""6#5#56<<> .--KK""$MM$$S)
++0P0P(33T[[__fF`F`FgFg 1Q+r_   r/  )rp   rq   rr   rs   r"   config_classload_tf_weightsbase_model_prefixsupports_gradient_checkpointing_supports_flash_attn_2_supports_sdpar	   ModulerP  ru   r/  r_   r1   rB  rB  ?  s3    #LO$&*#!NBII r_   rB  c                     ^  \ rS rSrS\4U 4S jjrS\R                  4S jrS\	4S jr
S\R                  4S jrS	\R                  4S
 jrS\\	\\\	      4   4S jr\       SS\\R&                     S\\R&                     S\\R&                     S\\R&                     S\\   S\\   S\\   S\\\\R&                  S4   4   4S jj5       rSrU =r$ )DistilBertModeli]  rD   c                    > [         TU ]  U5        [        U5      U l        [	        U5      U l        UR                  S:H  U l        UR                  S:H  U l        U R                  5         g )Nr  r  )
rM   rN   rB   rl   r%  transformerr  _use_flash_attention_2	_use_sdpa	post_initr[   s     r1   rN   DistilBertModel.__init___  s[     $V,&v.&,&A&AEX&X#44> 	r_   rb   c                 .    U R                   R                  $ z!
Returns the position embeddings
)rl   rT   r\   s    r1   get_position_embeddings'DistilBertModel.get_position_embeddingsj  s     222r_   new_num_position_embeddingsc                    XR                   R                  -
  nUS:X  a  g[        R                  SU S35        XR                   l        U R                  R
                  R                  R                  5       n[        R                  " U R                   R                  U R                   R                  5      U R                  l        U R                   R                  (       aH  [        U R                   R                  U R                   R                  U R
                  R                  S9  O[        R                  " 5          US:  a9  [        R                  " U5      U R                  R
                  R                  SU* & O2[        R                  " USU 5      U R                  R
                  l        SSS5        U R                  R
                  R!                  U R"                  5        g! , (       d  f       N>= f)  
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.

Arguments:
    new_num_position_embeddings (`int`):
        The number of new position embedding matrix. If position embeddings are learned, increasing the size
        will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
        end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
        size will add correct vectors at the end following the position encoding algorithm, whereas reducing
        the size will remove vectors from the end.
r   Nz(Setting `config.max_position_embeddings=z`...r)   )rD   rS   r   inforl   rT   r   cloner	   rO   r%   rO  r2   r-   no_grad	Parameterr   re   )r\   rf  num_position_embeds_diffold_position_embeddings_weights       r1   resize_position_embeddings*DistilBertModel.resize_position_embeddingsp  si    $?AdAd#d  $q(>?Z>[[_`a.I+)-)L)L)S)S)Y)Y)[&.0ll4;;;^;^`d`k`k`o`o.p+;;++(kk99t{{TXTlTlTsTs +a/]_]i]i6^DOO77>>?YAY@YZ BD67P8PQBDOO77> ! 	++..t{{; !s   =A2G''
G5c                 .    U R                   R                  $ ro   rl   rR   rc  s    r1   get_input_embeddings$DistilBertModel.get_input_embeddings  s    ...r_   new_embeddingsc                 $    XR                   l        g ro   rr  r\   ru  s     r1   set_input_embeddings$DistilBertModel.set_input_embeddings  s    *8'r_   heads_to_prunec                     UR                  5        H7  u  p#U R                  R                  U   R                  R	                  U5        M9     g)z
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
N)itemsr\  r)  r  r   )r\   rz  r)  r   s       r1   _prune_headsDistilBertModel._prune_heads  s>    
 +002LE""5)33??F 3r_   r`   attention_maskr   inputs_embedsr   r,  r-  .c           	         Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nUb  Ub  [	        S5      eUb"  U R                  X5        UR                  5       nO"Ub  UR                  5       SS nO[	        S5      eUb  UR                  OUR                  n	USL n
U R                  X0R                   R                  5      nU R                  X5      nU R                  (       a  Ub  SU;   a  UOSnOMUc  [        R                  " XS9nU R                  (       a%  U
(       a  U(       d  [        X+R                   US   S9nU R#                  UUUUUUS	9$ )
  
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`):
    Indices of input sequence tokens in the vocabulary.

    Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
    [`PreTrainedTokenizer.__call__`] for details.

    [What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*):
    Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
    is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
    model's internal embedding lookup matrix.
NzDYou cannot specify both input_ids and inputs_embeds at the same timerK   z5You have to specify either input_ids or inputs_embedsr   )re   r!   )tgt_len)r   r   r   r   r,  r-  )rD   r   r,  use_return_dictr   %warn_if_padding_and_no_attention_maskrf   re   get_head_masknum_hidden_layersrl   r]  r-   onesr^  r   rd   r\  )r\   r`   r  r   r  r   r,  r-  input_shapere   head_mask_is_nonerl   s               r1   rm   DistilBertModel.forward  s   0 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B] ]%>cdd"66yQ#..*K&',,.s3KTUU%.%:!!@T@T%-&&y++2O2OP	__Y>
&&0>0JqTbOb^imN%!&K!G~~"3<M!D"$4$4k!n" $/!5#   
 	
r_   )r]  r^  rl   r\  )NNNNNNN)rp   rq   rr   rs   r   rN   r	   rO   rd  r   ro  rs  rx  r   r   r}  r   r   r-   rt   r   r   r   r   rm   ru   rv   rw   s   @r1   rZ  rZ  ]  s8   	/ 	3 3(<c (<T/bll /92<< 9G4T$s)_0D+E G  -115,004,0/3&*A
ELL)A
 !.A
 ELL)	A

  -A
 $D>A
 'tnA
 d^A
 
ellC&7 88	9A
 A
r_   rZ  zI
    DistilBert Model with a `masked language modeling` head on top.
    )custom_introc                     ^  \ rS rSrS/rS\4U 4S jjrS\R                  4S jr	S\
4S jrS\R                  4S	 jrS
\R                  4S jr\        SS\\R$                     S\\R$                     S\\R$                     S\\R$                     S\\R&                     S\\   S\\   S\\   S\\\\R$                  S4   4   4S jj5       rSrU =r$ )DistilBertForMaskedLMi  zvocab_projector.weightrD   c                   > [         TU ]  U5        [        UR                  5      U l        [	        U5      U l        [        R                  " UR                  UR                  5      U l	        [        R                  " UR                  SS9U l        [        R                  " UR                  UR                  5      U l        U R                  5         [        R                  " 5       U l        g )NrG   rH   )rM   rN   r   r  rZ  rC  r	   r   r%   vocab_transformrU   vocab_layer_normrP   vocab_projectorr_  r   mlm_loss_fctr[   s     r1   rN   DistilBertForMaskedLM.__init__  s     ():):;)&1!yyVZZ@ "VZZU C!yyV5F5FG 	//1r_   rb   c                 6    U R                   R                  5       $ rb  rC  rd  rc  s    r1   rd  -DistilBertForMaskedLM.get_position_embeddings       6688r_   rf  c                 :    U R                   R                  U5        grh  NrC  ro  r\   rf  s     r1   ro  0DistilBertForMaskedLM.resize_position_embeddings
       	223NOr_   c                     U R                   $ ro   r  rc  s    r1   get_output_embeddings+DistilBertForMaskedLM.get_output_embeddings  s    ###r_   ru  c                     Xl         g ro   r  rw  s     r1   set_output_embeddings+DistilBertForMaskedLM.set_output_embeddings  s    -r_   r`   r  r   r  labelsr   r,  r-  .c	           
         Ub  UOU R                   R                  nU R                  UUUUUUUS9n	U	S   n
U R                  U
5      nU R	                  U5      nU R                  U5      nU R                  U5      nSnUb@  U R                  UR                  SUR                  S5      5      UR                  S5      5      nU(       d  U4U	SS -   nUb  U4U-   $ U$ [        UUU	R                  U	R                  S9$ )a*  
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`):
    Indices of input sequence tokens in the vocabulary.

    Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
    [`PreTrainedTokenizer.__call__`] for details.

    [What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*):
    Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
    is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
    model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
    config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
    loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Nr`   r  r   r  r   r,  r-  r   rK   r!   losslogitsr5  r6  )rD   r  rC  r  r  r  r  r  r   rf   r   r5  r6  )r\   r`   r  r   r  r  r   r,  r-  dlbrt_outputr5  prediction_logitsmlm_lossr"  s                 r1   rm   DistilBertForMaskedLM.forward  s*   : &1%<k$++B]B])'/!5# ' 
 %Q 00? OO,=> 112CD 001BC(():)?)?DUDZDZ[]D^)_agalalmoapqH')L,<<F-5-AXK&(MvM$&44#..	
 	
r_   )r  rC  r  r  r  r  NNNNNNNN)rp   rq   rr   rs   _tied_weights_keysr   rN   r	   rO   rd  r   ro  rX  r  r  r   r   r-   rt   
LongTensorr   r   r   r   rm   ru   rv   rw   s   @r1   r  r    s2    332/ 29 9Pc P$ryy $.BII .  -115,004-1,0/3&*:
ELL):
 !.:
 ELL)	:

  -:
 ))*:
 $D>:
 'tn:
 d^:
 
~uU\\3%677	8:
 :
r_   r  z
    DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
    pooled output) e.g. for GLUE tasks.
    c                   h  ^  \ rS rSrS\4U 4S jjrS\R                  4S jrS\	4S jr
\        SS\\R                     S	\\R                     S
\\R                     S\\R                     S\\R                     S\\   S\\   S\\   S\\\\R                  S4   4   4S jj5       rSrU =r$ )#DistilBertForSequenceClassificationi\  rD   c                   > [         TU ]  U5        UR                  U l        Xl        [	        U5      U l        [        R                  " UR                  UR                  5      U l	        [        R                  " UR                  UR                  5      U l
        [        R                  " UR                  5      U l        U R                  5         g ro   )rM   rN   
num_labelsrD   rZ  rC  r	   r   r%   pre_classifier
classifierrV   seq_classif_dropoutrW   r_  r[   s     r1   rN   ,DistilBertForSequenceClassification.__init__c  s      ++)&1 ii

FJJ?))FJJ0A0ABzz&"<"<= 	r_   rb   c                 6    U R                   R                  5       $ rb  r  rc  s    r1   rd  ;DistilBertForSequenceClassification.get_position_embeddingsp  r  r_   rf  c                 :    U R                   R                  U5        gr  r  r  s     r1   ro  >DistilBertForSequenceClassification.resize_position_embeddingsv  r  r_   r`   r  r   r  r  r   r,  r-  .c	           
         Ub  UOU R                   R                  nU R                  UUUUUUUS9n	U	S   n
U
SS2S4   nU R                  U5      n[        R
                  " 5       " U5      nU R                  U5      nU R                  U5      nSnUGb  U R                   R                  c  U R                  S:X  a  SU R                   l        OoU R                  S:  aN  UR                  [        R                  :X  d  UR                  [        R                  :X  a  SU R                   l        OSU R                   l        U R                   R                  S:X  aI  [        5       nU R                  S:X  a&  U" UR                  5       UR                  5       5      nOU" X5      nOU R                   R                  S:X  a=  [!        5       nU" UR#                  SU R                  5      UR#                  S5      5      nO,U R                   R                  S:X  a  [%        5       nU" X5      nU(       d  U4U	SS -   nUb  U4U-   $ U$ ['        UUU	R(                  U	R*                  S	9$ )
ae  
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
    config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
    `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Nr  r   r!   
regressionsingle_label_classificationmulti_label_classificationrK   r  )rD   r  rC  r  r	   ReLUrW   r  problem_typer  rd   r-   rh   r   r   squeezer   r   r
   r   r5  r6  )r\   r`   r  r   r  r  r   r,  r-  distilbert_outputr<  pooled_outputr  r  loss_fctr"  s                   r1   rm   +DistilBertForSequenceClassification.forward  s   $ &1%<k$++B]B] OO)'/!5# , 
 )+$QT*++M:	-0]3/{{''/??a'/;DKK,__q(fllejj.HFLL\a\e\eLe/LDKK,/KDKK,{{''<7"9??a'#FNN$4fnn6FGD#F3D))-JJ+-B @&++b/R))-II,./Y!212!66F)-)9TGf$EvE'+99(33	
 	
r_   )r  rD   rC  rW   r  r  r  )rp   rq   rr   rs   r   rN   r	   rO   rd  r   ro  r   r   r-   rt   r  r   r   r   r   rm   ru   rv   rw   s   @r1   r  r  \  s   / 9 9Pc P  -115,004-1,0/3&*C
ELL)C
 !.C
 ELL)	C

  -C
 ))*C
 $D>C
 'tnC
 d^C
 
'u||S/@)AA	BC
 C
r_   r  c                     ^  \ rS rSrS\4U 4S jjrS\R                  4S jrS\	4S jr
\         SS\\R                     S	\\R                     S
\\R                     S\\R                     S\\R                     S\\R                     S\\   S\\   S\\   S\\\\R                  S4   4   4S jj5       rSrU =r$ )DistilBertForQuestionAnsweringi  rD   c                 ^  > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  UR                  5      U l        UR                  S:w  a  [        SUR                   35      e[        R                  " UR                  5      U l        U R                  5         g )Nr4   z)config.num_labels should be 2, but it is )rM   rN   rZ  rC  r	   r   r%   r  
qa_outputsr   rV   
qa_dropoutrW   r_  r[   s     r1   rN   'DistilBertForQuestionAnswering.__init__  s     )&1))FJJ0A0AB!HIZIZH[\]]zz&"3"34 	r_   rb   c                 6    U R                   R                  5       $ rb  r  rc  s    r1   rd  6DistilBertForQuestionAnswering.get_position_embeddings  r  r_   rf  c                 :    U R                   R                  U5        gr  r  r  s     r1   ro  9DistilBertForQuestionAnswering.resize_position_embeddings  r  r_   r`   r  r   r  start_positionsend_positionsr   r,  r-  .c
           
      R   U	b  U	OU R                   R                  n	U R                  UUUUUUU	S9n
U
S   nU R                  U5      nU R	                  U5      nUR                  SSS9u  pUR                  S5      R                  5       nUR                  S5      R                  5       nSnUb  Ub  [        UR                  5       5      S:  a  UR                  S5      n[        UR                  5       5      S:  a  UR                  S5      nUR                  S5      nUR                  SU5      nUR                  SU5      n[        R                  " US9nU" X5      nU" X5      nUU-   S-  nU	(       d  X4U
SS -   nUb  U4U-   $ U$ [        UUUU
R                  U
R                  S	9$ )
r  Nr  r   r!   rK   r   )ignore_indexr4   )r  start_logits
end_logitsr5  r6  )rD   r  rC  rW   r  splitr  r   r   rf   clampr	   r   r   r5  r6  )r\   r`   r  r   r  r  r  r   r,  r-  r  r5  r  r  r  
total_lossignored_indexr  
start_lossend_lossr"  s                        r1   rm   &DistilBertForQuestionAnswering.forward  s   4 &1%<k$++B]B] OO)'/!5# , 
 *!,]3/#)<<r<#: #++B/::<''+668

&=+D?'')*Q."1"9"9""==%%'(1, - 5 5b 9(--a0M-33A}EO)//=AM**FH!,@J
:H$x/14J"/2CAB2GGF/9/EZMF*Q6Q+%!+99(33
 	
r_   )rC  rW   r  )	NNNNNNNNN)rp   rq   rr   rs   r   rN   r	   rO   rd  r   ro  r   r   r-   rt   r   r   r   r   rm   ru   rv   rw   s   @r1   r  r    s(   / 9 9Pc P  -115,0042604,0/3&*G
ELL)G
 !.G
 ELL)	G

  -G
 "%,,/G
  -G
 $D>G
 'tnG
 d^G
 
+U5<<3D-EE	FG
 G
r_   r  c                   h  ^  \ rS rSrS\4U 4S jjrS\R                  4S jrS\	4S jr
\        SS\\R                     S	\\R                     S
\\R                     S\\R                     S\\R                     S\\   S\\   S\\   S\\\\R                  S4   4   4S jj5       rSrU =r$ ) DistilBertForTokenClassificationi9  rD   c                 0  > [         TU ]  U5        UR                  U l        [        U5      U l        [
        R                  " UR                  5      U l        [
        R                  " UR                  UR                  5      U l
        U R                  5         g ro   )rM   rN   r  rZ  rC  r	   rV   rW   r   hidden_sizer  r_  r[   s     r1   rN   )DistilBertForTokenClassification.__init__;  sg      ++)&1zz&..1))F$6$68I8IJ 	r_   rb   c                 6    U R                   R                  5       $ rb  r  rc  s    r1   rd  8DistilBertForTokenClassification.get_position_embeddingsF  r  r_   rf  c                 :    U R                   R                  U5        gr  r  r  s     r1   ro  ;DistilBertForTokenClassification.resize_position_embeddingsL  r  r_   r`   r  r   r  r  r   r,  r-  .c	           
         Ub  UOU R                   R                  nU R                  UUUUUUUS9n	U	S   n
U R                  U
5      n
U R	                  U
5      nSnUb<  [        5       nU" UR                  SU R                  5      UR                  S5      5      nU(       d  U4U	SS -   nUb  U4U-   $ U$ [        UUU	R                  U	R                  S9$ )z
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Nr  r   r  r   r,  r-  r   rK   r!   r  )rD   r  rC  rW   r  r   r   r  r   r5  r6  )r\   r`   r  r   r  r  r   r,  r-  outputssequence_outputr  r  r  r"  s                  r1   rm   (DistilBertForTokenClassification.forwardZ  s      &1%<k$++B]B]//)'/!5# " 
 "!*,,71')HFKKDOO<fkk"oNDY,F)-)9TGf$EvE$!//))	
 	
r_   )r  rC  rW   r  r  )rp   rq   rr   rs   r   rN   r	   rO   rd  r   ro  r   r   r-   rt   r  r   r   r   r   rm   ru   rv   rw   s   @r1   r  r  9  s   	/ 	9 9Pc P  -115,004-1,0/3&*.
ELL).
 !..
 ELL)	.

  -.
 ))*.
 $D>.
 'tn.
 d^.
 
$eELL#,=&>>	?.
 .
r_   r  c                   h  ^  \ rS rSrS\4U 4S jjrS\R                  4S jrS\	4S jr
\        SS\\R                     S	\\R                     S
\\R                     S\\R                     S\\R                     S\\   S\\   S\\   S\\\\R                  S4   4   4S jj5       rSrU =r$ )DistilBertForMultipleChoicei  rD   c                 Z  > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  UR                  5      U l        [        R
                  " UR                  S5      U l        [        R                  " UR                  5      U l        U R                  5         g )Nr!   )rM   rN   rZ  rC  r	   r   r%   r  r  rV   r  rW   r_  r[   s     r1   rN   $DistilBertForMultipleChoice.__init__  so     )&1 ii

FJJ?))FJJ2zz&"<"<= 	r_   rb   c                 6    U R                   R                  5       $ rb  r  rc  s    r1   rd  3DistilBertForMultipleChoice.get_position_embeddings  r  r_   rf  c                 :    U R                   R                  U5        g)a  
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.

Arguments:
    new_num_position_embeddings (`int`)
        The number of new position embeddings. If position embeddings are learned, increasing the size will add
        newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
        position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
        add correct vectors at the end following the position encoding algorithm, whereas reducing the size
        will remove vectors from the end.
Nr  r  s     r1   ro  6DistilBertForMultipleChoice.resize_position_embeddings  r  r_   r`   r  r   r  r  r   r,  r-  .c	           
      (   Ub  UOU R                   R                  nUb  UR                  S   OUR                  S   n	Ub!  UR                  SUR	                  S5      5      OSnUb!  UR                  SUR	                  S5      5      OSnUb1  UR                  SUR	                  S5      UR	                  S5      5      OSnU R                  UUUUUUUS9n
U
S   nUSS2S4   nU R                  U5      n[        R                  " 5       " U5      nU R                  U5      nU R                  U5      nUR                  SU	5      nSnUb  [        5       nU" X5      nU(       d  U4U
SS -   nUb  U4U-   $ U$ [        UUU
R                  U
R                  S9$ )av  
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
    Indices of input sequence tokens in the vocabulary.

    Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
    [`PreTrainedTokenizer.__call__`] for details.

    [What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
    Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
    is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
    model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
    num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
    `input_ids` above)

Examples:

```python
>>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
>>> import torch

>>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
>>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")

>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0)  # choice0 is correct (according to Wikipedia ;)), batch size 1

>>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
>>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels)  # batch size is 1

>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
```Nr!   rK   r  r   r  )rD   r  r   r   rf   rC  r  r	   r  rW   r  r   r   r5  r6  )r\   r`   r  r   r  r  r   r,  r-  num_choicesr  r<  r  r  reshaped_logitsr  r  r"  s                     r1   rm   #DistilBertForMultipleChoice.forward  s   d &1%<k$++B]B],5,Aiooa(}GZGZ[\G]>G>SINN2y~~b'9:Y]	M[Mg,,R1D1DR1HImq ( r=#5#5b#9=;M;Mb;QR 	 //)'/!5# " 
 qz$QT*++M:	-0]3/ ++b+6')HO4D%''!"+5F)-)9TGf$EvE("!//))	
 	
r_   )r  rC  rW   r  r  )rp   rq   rr   rs   r   rN   r	   rO   rd  r   ro  r   r   r-   rt   r  r   r   r   r   rm   ru   rv   rw   s   @r1   r  r    s   	/ 	9 9Pc P  -115,004-1,0/3&*]
ELL)]
 !.]
 ELL)	]

  -]
 ))*]
 $D>]
 'tn]
 d^]
 
(%c0A*BB	C]
 ]
r_   r  )r  r  r  r  r  rZ  rB  )Hr   r   typingr   r   r   r   r   r   numpyr5   r-   r	   torch.nnr
   r   r   activationsr   configuration_utilsr   integrations.deepspeedr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   r   modeling_outputsr   r   r   r   r   r   modeling_utilsr   pytorch_utilsr   r   r   r   utilsr   r    configuration_distilbertr"   r#   
get_loggerrp   r   r   rt   r2   r0   rX  rB   ry   r   r   r   r  r  r%  rB  rZ  r  r  r  r  r  __all__r/  r_   r1   <module>r     s7   
  : :    A A ) 3 @ K h  .  7 J 
		H	%E E# EELL E 3 U\\ + +\eRYY eP]" 6 ]"@M4 M`")) , $2#  7ryy 7tJ
")) J
\   : L
/ L
 L
^ 
g
5 g

g
T f
*C f
f
R j
%> j
 j
Z O
'@ O
 O
d ~
"; ~
 ~
Br_   