
    fTh4                       S SK r S SKrS SKJr  S SKJrJrJr  S SKr	S SK
r
S SKJr  S SKJr  SSKJr  SSKJr  SSKJr  SS	KJrJr  SS
KJrJrJrJrJr  SSKJr  SSKJ r J!r!  SSK"J#r#  \" 5       (       a  SSKJ$r$  \!RJ                  " \&5      r'\ " S S\5      5       r( " S S\RR                  5      r* " S S\RR                  5      r+ " S S\RR                  5      r, " S S\RR                  5      r- " S S\RR                  5      r. " S S\RR                  5      r/ " S S\RR                  5      r0 " S  S!\RR                  5      r1 " S" S#\15      r2 " S$ S%\15      r3 " S& S'\RR                  5      r4\1\3\2S(.r5 " S) S*\RR                  5      r6 " S+ S,\RR                  5      r7 " S- S.\RR                  5      r8 " S/ S0\RR                  5      r9 " S1 S2\RR                  5      r: " S3 S4\RR                  5      r;\  " S5 S6\5      5       r<  SLS7\\=\=4   S8\>S9\=S:\\
R~                     S;\=S<\	R                  4S= jjrA\rB\  " S> S?\<5      5       rC\ " S@SA9 " SB SC\<5      5       rDSDrE\ " SESA9 " SF SG\<5      5       rF\ " SHSA9 " SI SJ\<5      5       rG/ SKQrHg)M    N)	dataclass)OptionalTupleUnion)CrossEntropyLoss   )ACT2FN)is_deepspeed_zero3_enabled)is_fsdp_managed_module)!flash_attn_supports_top_left_maskis_flash_attn_available)BaseModelOutputCausalLMOutputModelOutputSequenceClassifierOutputWav2Vec2BaseModelOutput)PreTrainedModel)auto_docstringlogging   )UniSpeechConfig)_flash_attention_forwardc                      \ rS rSr% SrSr\\R                     \	S'   Sr
\\R                     \	S'   Sr\\R                     \	S'   Sr\\R                     \	S'   Sr\\\R                        \	S'   Sr\\\R                        \	S	'   S
rg)UniSpeechForPreTrainingOutput(   a  
Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.

Args:
    loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):
        Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
        paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
    projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
        Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
        projected quantized states.
    projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
        Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
        target vectors for contrastive loss.
    hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, sequence_length, hidden_size)`.

        Hidden-states of the model at the output of each layer plus the initial embedding outputs.
    attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
        sequence_length)`.

        Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
        heads.
Nlossprojected_statesprojected_quantized_statescodevector_perplexityhidden_states
attentions )__name__
__module____qualname____firstlineno____doc__r   r   torchFloatTensor__annotations__r   r   r   r    r   r!   __static_attributes__r"       h/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/unispeech/modeling_unispeech.pyr   r   (   s    4 )-D(5$$
%,48hu0018>B):): ;B9=8E$5$56=8<M8E%"3"345<59Ju00129r,   r   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )UniSpeechSamePadLayerL   c                 R   > [         TU ]  5         US-  S:X  a  SU l        g SU l        g )N   r   r   )super__init__num_pad_remove)selfnum_conv_pos_embeddings	__class__s     r-   r4   UniSpeechSamePadLayer.__init__M   s)    #:Q#>!#Car,   c                 X    U R                   S:  a  US S 2S S 2S U R                   * 24   nU$ Nr   r5   r6   r    s     r-   forwardUniSpeechSamePadLayer.forwardQ   s6    ")!Q0F43F3F2F0F*FGMr,   r<   r#   r$   r%   r&   r4   r>   r+   __classcell__r8   s   @r-   r/   r/   L   s    K r,   r/   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ ) UniSpeechPositionalConvEmbeddingW   c                   > [         TU ]  5         [        R                  " UR                  UR                  UR
                  UR
                  S-  UR                  S9U l        [        R                  R                  n[        [        R                  R                  S5      (       a$  [        R                  R                  R                  n[        5       (       Ga%  SS KnUR                  R                  U R                  R                   SS9   U" U R                  SSS9U l        S S S 5        [        U R                  S5      (       aU  U R                  R                  R                   R"                  nU R                  R                  R                   R$                  nO,U R                  R&                  nU R                  R(                  nUR                  R+                  X5        UR                  R+                  X5        OU" U R                  SSS9U l        [-        UR
                  5      U l        [0        UR2                     U l        g ! , (       d  f       GN,= f)	Nr2   )kernel_sizepaddinggroupsweight_normr   )modifier_rankweight)namedimparametrizations)r3   r4   nnConv1dhidden_sizer7   num_conv_pos_embedding_groupsconvutilsrJ   hasattrrO   r
   	deepspeedzeroGatheredParametersrL   	original0	original1weight_gweight_vregister_external_parameterr/   rH   r	   feat_extract_activation
activation)r6   configrJ   rW   r\   r]   r8   s         r-   r4   )UniSpeechPositionalConvEmbedding.__init__X   s   II6622a777
	 hh**288,,m<<((33??K%''224993C3CST2U'		aH	 Vtyy"4559955<<FF9955<<FF99--99--NN66tFNN66tF#DIIH!DDI,V-K-KL !?!?@ VUs   I
Ic                     UR                  SS5      nU R                  U5      nU R                  U5      nU R                  U5      nUR                  SS5      nU$ Nr   r2   )	transposerT   rH   r`   r=   s     r-   r>   (UniSpeechPositionalConvEmbedding.forwardy   sV    %//15		-0]36%//15r,   )r`   rT   rH   r@   rB   s   @r-   rD   rD   W   s    AB r,   rD   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )UniSpeechNoLayerNormConvLayer   c                 b  > [         TU ]  5         US:  a  UR                  US-
     OSU l        UR                  U   U l        [
        R                  " U R                  U R                  UR                  U   UR                  U   UR                  S9U l
        [        UR                     U l        g )Nr   r   rG   stridebias)r3   r4   conv_dimin_conv_dimout_conv_dimrP   rQ   conv_kernelconv_stride	conv_biasrT   r	   r_   r`   r6   ra   layer_idr8   s      r-   r4   &UniSpeechNoLayerNormConvLayer.__init__   s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 !!?!?@r,   c                 J    U R                  U5      nU R                  U5      nU$ N)rT   r`   r=   s     r-   r>   %UniSpeechNoLayerNormConvLayer.forward   s$    		-06r,   )r`   rT   ro   rp   r   r@   rB   s   @r-   rh   rh      s    A r,   rh   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )UniSpeechLayerNormConvLayer   c                   > [         TU ]  5         US:  a  UR                  US-
     OSU l        UR                  U   U l        [
        R                  " U R                  U R                  UR                  U   UR                  U   UR                  S9U l
        [
        R                  " U R                  SS9U l        [        UR                     U l        g )Nr   r   rk   T)elementwise_affine)r3   r4   rn   ro   rp   rP   rQ   rq   rr   rs   rT   	LayerNorm
layer_normr	   r_   r`   rt   s      r-   r4   $UniSpeechLayerNormConvLayer.__init__   s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 ,,t'8'8TR !?!?@r,   c                     U R                  U5      nUR                  SS5      nU R                  U5      nUR                  SS5      nU R                  U5      nU$ )N)rT   re   r   r`   r=   s     r-   r>   #UniSpeechLayerNormConvLayer.forward   sV    		-0%//B76%//B76r,   r`   rT   ro   r   rp   rz   r@   rB   s   @r-   r|   r|      s    A r,   r|   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )UniSpeechGroupNormConvLayer   c                   > [         TU ]  5         US:  a  UR                  US-
     OSU l        UR                  U   U l        [
        R                  " U R                  U R                  UR                  U   UR                  U   UR                  S9U l
        [        UR                     U l        [
        R                  " U R                  U R                  SS9U l        g )Nr   r   rk   T)
num_groupsnum_channelsaffine)r3   r4   rn   ro   rp   rP   rQ   rq   rr   rs   rT   r	   r_   r`   	GroupNormr   rt   s      r-   r4   $UniSpeechGroupNormConvLayer.__init__   s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 !!?!?@,,$2C2CRVRcRclpqr,   c                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ rx   )rT   r   r`   r=   s     r-   r>   #UniSpeechGroupNormConvLayer.forward   s2    		-066r,   r   rz   r@   rB   s   @r-   r   r      s    r  r,   r   c                   8   ^  \ rS rSrSrU 4S jrS rS rSrU =r	$ )UniSpeechFeatureEncoder   z.Construct the features from raw audio waveformc           	        > [         TU ]  5         UR                  S:X  a@  [        USS9/[	        UR
                  S-
  5       Vs/ s H  n[        XS-   S9PM     sn-   nOVUR                  S:X  a-  [	        UR
                  5       Vs/ s H  n[        XS9PM     nnO[        SUR                   S35      e[        R                  " U5      U l        SU l        S	U l        g s  snf s  snf )
Ngroupr   )ru   r   layerz`config.feat_extract_norm` is z), but has to be one of ['group', 'layer']FT)r3   r4   feat_extract_normr   rangenum_feat_extract_layersrh   r|   
ValueErrorrP   
ModuleListconv_layersgradient_checkpointing_requires_grad)r6   ra   ir   r8   s       r-   r4    UniSpeechFeatureEncoder.__init__   s    ##w.6vJKv==ABOBA .f1uEBO K %%0INvOmOmInInA+F?In  K 01I1I0JJst  ==5&+#"O
s   C C%c                 N    U R                  5        H
  nSUl        M     SU l        g NF)
parametersrequires_gradr   r6   params     r-   _freeze_parameters*UniSpeechFeatureEncoder._freeze_parameters   s#    __&E"'E '#r,   c                 B   US S 2S 4   nU R                   (       a  U R                  (       a  SUl        U R                   H\  nU R                   (       a@  U R                  (       a/  U R                  (       a  U R                  UR                  U5      nMT  U" U5      nM^     U$ )NT)r   trainingr   r   r   _gradient_checkpointing_func__call__)r6   input_valuesr    
conv_layers       r-   r>   UniSpeechFeatureEncoder.forward   s    $QW- 4==*.M'**J""t'B'Bt}} $ A A''!!
 !+= 9 + r,   )r   r   r   )
r#   r$   r%   r&   r'   r4   r   r>   r+   rA   rB   s   @r-   r   r      s    8#($
 r,   r   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )UniSpeechFeatureProjection   c                 4  > [         TU ]  5         [        R                  " UR                  S   UR
                  S9U l        [        R                  " UR                  S   UR                  5      U l	        [        R                  " UR                  5      U l        g )Nr   eps)r3   r4   rP   r   rn   layer_norm_epsr   LinearrR   
projectionDropoutfeat_proj_dropoutdropoutr6   ra   r8   s     r-   r4   #UniSpeechFeatureProjection.__init__   sf    ,,vr':@U@UV))FOOB$79K9KLzz&":":;r,   c                 n    U R                  U5      nU R                  U5      nU R                  U5      nX4$ rx   )r   r   r   )r6   r    norm_hidden_statess      r-   r>   "UniSpeechFeatureProjection.forward  s7    !__];(:;]300r,   )r   r   r   r@   rB   s   @r-   r   r      s    <1 1r,   r   c                     ^  \ rS rSrSr     SS\S\S\S\S\S\S	\\	   4U 4S
 jjjr
S\R                  S\S\4S jr     SS\R                  S\\R                     S\\\R                        S\\R                     S\\R                     S\S\\R                  \\R                     \\\R                        4   4S jjrSrU =r$ )UniSpeechAttentioni
  z=Multi-headed attention from 'Attention Is All You Need' paper	embed_dim	num_headsr   
is_decoderrm   	is_causalra   c                   > [         TU ]  5         Xl        X l        X0l        X-  U l        Xpl        U R
                  U-  U R                  :w  a  [        SU R                   SU S35      eU R
                  S-  U l        X@l	        X`l
        [        R                  " XUS9U l        [        R                  " XUS9U l        [        R                  " XUS9U l        [        R                  " XUS9U l        g )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      )rm   )r3   r4   r   r   r   head_dimra   r   scalingr   r   rP   r   k_projv_projq_projout_proj)	r6   r   r   r   r   rm   r   ra   r8   s	           r-   r4   UniSpeechAttention.__init__  s     	""!.MMI%$..8MdnnM]$YKr3  }}d*$"ii	4@ii	4@ii	4@		)TBr,   tensorseq_lenbszc                     UR                  X2U R                  U R                  5      R                  SS5      R	                  5       $ rd   )viewr   r   re   
contiguousr6   r   r   r   s       r-   _shapeUniSpeechAttention._shape,  s5    {{3GQQRSUVWbbddr,   r    key_value_statespast_key_valueattention_masklayer_head_maskoutput_attentionsreturnc                 	   USLnUR                  5       u  pn
U R                  U5      U R                  -  nU(       a2  Ub/  US   R                  S   UR                  S   :X  a  US   nUS   nGOU(       aE  U R	                  U R                  U5      SU5      nU R	                  U R                  U5      SU5      nOUby  U R	                  U R                  U5      SU5      nU R	                  U R                  U5      SU5      n[        R                  " US   U/SS9n[        R                  " US   U/SS9nODU R	                  U R                  U5      SU5      nU R	                  U R                  U5      SU5      nU R                  (       a  X4nXR                  -  SU R                  4nU R	                  XU5      R                  " U6 nUR                  " U6 nUR                  " U6 nUR                  S5      n[        R                  " XR                  SS5      5      nUR                  5       XR                  -  X4:w  a-  [!        SXR                  -  X4 SUR                  5        35      eUbv  UR                  5       USX4:w  a"  [!        S	USX4 SUR                  5        35      eUR                  XR                  X5      U-   nUR                  XR                  -  X5      n["        R$                  R'                  USS9nUb  UR                  5       U R                  4:w  a*  [!        S
U R                  4 SUR                  5        35      eUR                  SSSS5      UR                  XR                  X5      -  nUR                  XR                  -  X5      nU(       a;  UR                  XR                  X5      nUR                  XR                  -  X5      nOSn["        R$                  R)                  UU R(                  U R*                  S9n[        R                  " UU5      nUR                  5       XR                  -  XR                  4:w  a7  [!        SXR                  -  XR                  4 SUR                  5        35      eUR                  XR                  XR                  5      nUR                  SS5      nUR                  XU R,                  5      nU R/                  U5      nUUU4$ )#Input shape: Batch x Time x ChannelNr   r2   r   r   rN   z$Attention weights should be of size 	, but is z!Attention mask should be of size z/Head mask for a single layer should be of size )pr    `attn_output` should be of size )sizer   r   shaper   r   r   r(   catr   r   r   r   reshapebmmre   r   rP   
functionalsoftmaxr   r   r   r   )r6   r    r   r   r   r   r   is_cross_attentionr   tgt_len_query_states
key_statesvalue_states
proj_shapesrc_lenattn_weightsattn_weights_reshaped
attn_probsattn_outputs                       r-   r>   UniSpeechAttention.forward/  s    .T9',,.a {{=1DLL@ *q!''*.>.D.DQ.GG (*J)!,LT[[1A%BBLJ;;t{{3C'Db#NL'T[[%?SIJ;;t{{='A2sKLN1$5z#BJJ 99nQ&7%FANL T[[%?SIJ;;t{{='A2sKL?? )7NNN*B>
{{<#>CCZP''4
#++Z8//!$yy/C/CAq/IJ3#7"JJ6nn8Lg7_6` a %%'(* 
 %""$a(BB 7a8R7SS\]k]p]p]r\st  (,,S..'SVddL',,S>>-A7TL}},,\r,B&##%$..):: Et~~FWEX Y',,./1  +//2q!<|?P?PQTVdVdfm?wwL',,S>>-A7TL
 %1$5$5c>>7$\!055cNN6JG]L$(!]]**<4<<RVR_R_*`
ii
L9#"6!OO2C..4H'S`S`3a2b c$$&') 
 "&&sNNG]]S!++Aq1 "))#GmmK01>AAr,   )ra   r   r   r   r   r   r   r   r   r   r   r   )        FTFNNNNNF)r#   r$   r%   r&   r'   intfloatboolr   r   r4   r(   Tensorr   r   r>   r+   rA   rB   s   @r-   r   r   
  sZ   G  ,0CC C 	C
 C C C )C C>eU\\ eC ec e 488<1526"'vB||vB #5<<0vB !u||!45	vB
 !.vB "%,,/vB  vB 
u||Xell3XeELL>Q5RR	SvB vBr,   r   c                   f  ^  \ rS rSrSrU 4S jrS\R                  S\S\4S jr	     SS\R                  S	\
\R                     S
\
\\R                        S\
\R                     S\
\R                     S\S\\R                  \
\R                     \
\\R                        4   4S jjrSrU =r$ )UniSpeechFlashAttention2i  a>  
UniSpeech flash attention module. This module inherits from `UniSpeechAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
c                 D   > [         TU ]  " U0 UD6  [        5       U l        g rx   )r3   r4   r   _flash_attn_uses_top_left_mask)r6   argskwargsr8   s      r-   r4   !UniSpeechFlashAttention2.__init__  s#    $)&)
 /P.Q+r,   r   r   r   c                 P    UR                  X2U R                  U R                  5      $ rx   )r   r   r   r   s       r-   _reshape!UniSpeechFlashAttention2._reshape  s    {{3GGr,   r    r   r   r   r   r   r   c                    US LnUR                  5       u  pn
U R                  U R                  U5      SU5      nU(       aR  UbO  US   R                  S   UR                  S   :X  a,  US   R	                  SS5      nUS   R	                  SS5      nGO,U(       aE  U R                  U R                  U5      SU5      nU R                  U R                  U5      SU5      nOUb  U R                  U R                  U5      SU5      nU R                  U R                  U5      SU5      n[        R                  " US   R	                  SS5      U/SS9n[        R                  " US   R	                  SS5      U/SS9nODU R                  U R                  U5      SU5      nU R                  U R                  U5      SU5      nU R                  (       a$  UR	                  SS5      UR	                  SS5      4nUR                  S   nUb  XS   R                  S   -  nUR                  nU[        R                  :X  a  [        R                  " 5       (       a  [        R                  " 5       nOR[        U R                  S5      (       a  U R                  R                   nO U R                  R"                  R                  n[$        R'                  SU S	35        UR)                  U5      nUR)                  U5      nUR)                  U5      n[+        UUUUU	U R,                  (       a  U R.                  OS
U R0                  U R2                  S9nUR5                  XS5      nU R7                  U5      nU(       d  S nUWU4$ )Nr   r   r2   r   r   r   _pre_quantization_dtypezThe input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in .r   )r   r   use_top_left_mask)r   r  r   r   re   r   r   r(   r   r   dtypefloat32is_autocast_enabledget_autocast_gpu_dtyperV   ra   r  rL   loggerwarning_oncetor   r   r   r   r  r   r   )r6   r    r   r   r   r   r   r   r   q_lenr   r   r   r   
kv_seq_leninput_dtypetarget_dtyper   r   s                      r-   r>    UniSpeechFlashAttention2.forward  s    .T9%**,A }}T[[%?SI *q!''*.>.D.DQ.GG (*44Q:J)!,66q!<Lt{{3C'Db#NJ==5E)FCPL't{{='A2sKJ==])CRMLN1$5$?$?1$Ez#RXYZJ 99nQ&7&A&A!Q&G%V\]^L t{{='A2sKJ==])CRML?? )221a8,:P:PQRTU:VWN%%b)
%+11"55J #((%--'((**$;;=&?@@#{{BB#{{1177 >$ (??<8L#|4J'??<8L.$(MMDLLsnn"AA	
 "))#b9mmK0 LL.88r,   )r  r   )r#   r$   r%   r&   r'   r4   r(   r   r   r  r   r   r   r>   r+   rA   rB   s   @r-   r  r    s    RHu|| Hc H H 488<1526"'e9||e9 #5<<0e9 !u||!45	e9
 !.e9 "%,,/e9  e9 
u||Xell3XeELL>Q5RR	Se9 e9r,   r  c                   2  ^  \ rS rSr     SS\R
                  S\\R
                     S\\\R
                        S\\R
                     S\\R
                     S\S\\R
                  \\R
                     \\\R
                        4   4U 4S	 jjjr	S
r
U =r$ )UniSpeechSdpaAttentioni"  r    r   r   r   r   r   r   c           	        > U(       a&  [         R                  S5        [        TU ]  UUUUUS9$ USLnUR	                  5       u  pn
U R                  U5      nU(       a2  Ub/  US   R                  S   UR                  S   :X  a  US   nUS   nGOU(       aE  U R                  U R                  U5      SU5      nU R                  U R                  U5      SU5      nOUby  U R                  U R                  U5      SU5      nU R                  U R                  U5      SU5      n[        R                  " US   U/SS9n[        R                  " US   U/SS9nODU R                  U R                  U5      SU5      nU R                  U R                  U5      SU5      nU R                  (       a  X4nU R                  XU5      nU R                  (       a  Uc  U	S:  a  S	OS
n[        R                  R                  R!                  UUUUU R"                  (       a  U R$                  OSUS9nUR	                  5       XR&                  XR(                  4:w  a5  [+        SXR&                  XR(                  4 SUR	                  5        35      eUR-                  SS5      nUR/                  XU R0                  5      nU R3                  U5      nUSU4$ )r   a  UniSpeechModel is using UniSpeechSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` . Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.)r   r   r   r   Nr   r2   r   r   r   TFr   )	attn_mask	dropout_pr   r   r   )r  r  r3   r>   r   r   r   r   r   r   r(   r   r   r   rP   r   scaled_dot_product_attentionr   r   r   r   r   re   r   r   r   )r6   r    r   r   r   r   r   r   r   r   r   r   r   r   r   r   r8   s                   r-   r>   UniSpeechSdpaAttention.forward#  s    l 7?!1--"3 #   .T9',,.a {{=1 *q!''*.>.D.DQ.GG (*J)!,LT[[1A%BBLJ;;t{{3C'Db#NL'T[[%?SIJ;;t{{='A2sKLN1$5z#BJJ 99nQ&7%FANL T[[%?SIJ;;t{{='A2sKL?? )7N{{<#>
 !NN~/E'TU+D[`	 hh))FF$&*mmdll G 
 #~~w!NN2CR_R_3`2a b$$&') 
 "++Aq1 "))#GmmK0D.00r,   r"   r   )r#   r$   r%   r&   r(   r   r   r   r   r>   r+   rA   rB   s   @r-   r  r  "  s     488<1526"'e1||e1 #5<<0e1 !u||!45	e1
 !.e1 "%,,/e1  e1 
u||Xell3XeELL>Q5RR	Se1 e1r,   r  c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )UniSpeechFeedForwardi  c                   > [         TU ]  5         [        R                  " UR                  5      U l        [        R                  " UR                  UR                  5      U l	        [        UR                  [        5      (       a  [        UR                     U l        OUR                  U l        [        R                  " UR                  UR                  5      U l        [        R                  " UR                   5      U l        g rx   )r3   r4   rP   r   activation_dropoutintermediate_dropoutr   rR   intermediate_sizeintermediate_dense
isinstance
hidden_actstrr	   intermediate_act_fnoutput_densehidden_dropoutoutput_dropoutr   s     r-   r4   UniSpeechFeedForward.__init__  s    $&JJv/H/H$I!"$))F,>,>@X@X"Yf''--'-f.?.?'@D$'-'8'8D$IIf&>&>@R@RS jj)>)>?r,   c                     U R                  U5      nU R                  U5      nU R                  U5      nU R                  U5      nU R	                  U5      nU$ rx   )r'  r+  r%  r,  r.  r=   s     r-   r>   UniSpeechFeedForward.forward  sX    //>00?11-@))-8++M:r,   )r+  r'  r%  r,  r.  r@   rB   s   @r-   r"  r"    s    @ r,   r"  )eagersdpaflash_attention_2c                   2   ^  \ rS rSrU 4S jrSS jrSrU =r$ )UniSpeechEncoderLayeri  c                   > [         TU ]  5         [        UR                     " UR                  UR
                  UR                  SS9U l        [        R                  " UR                  5      U l        [        R                  " UR                  UR                  S9U l        [        U5      U l        [        R                  " UR                  UR                  S9U l        g )NFr   r   r   r   r   )r3   r4   UNISPEECH_ATTENTION_CLASSES_attn_implementationrR   num_attention_headsattention_dropout	attentionrP   r   r-  r   r   r   r   r"  feed_forwardfinal_layer_normr   s     r-   r4   UniSpeechEncoderLayer.__init__  s    4V5P5PQ((00,,	
 zz&"7"78,,v'9'9v?T?TU08 "V-?-?VEZEZ [r,   c                     UnU R                  XUS9u  pnU R                  U5      nXA-   nU R                  U5      nXR                  U5      -   nU R	                  U5      nU4nU(       a  Xu4-  nU$ Nr   r   )r=  r   r   r>  r?  r6   r    r   r   attn_residualr   r   outputss           r-   r>   UniSpeechEncoderLayer.forward  s    %)-L] *8 *
&Q ]3%56%(9(9-(HH--m< "&Gr,   )r=  r   r>  r?  r   r   r@   rB   s   @r-   r6  r6    s    \ r,   r6  c                      ^  \ rS rSrU 4S jr    S
S\R                  S\\R                     S\	S\	S\	4
S jjr
S	rU =r$ )UniSpeechEncoderi  c                   > [         TU ]  5         Xl        [        U5      U l        [
        R                  " UR                  UR                  S9U l	        [
        R                  " UR                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[!        U5      PM     sn5      U l        SU l        UR&                  S:H  U l        g s  snf Nr   Fr4  )r3   r4   ra   rD   pos_conv_embedrP   r   rR   r   r   r   r-  r   r   r   num_hidden_layersr6  layersr   r:  _use_flash_attention_2r6   ra   r   r8   s      r-   r4   UniSpeechEncoder.__init__  s    >vF,,v'9'9v?T?TUzz&"7"78mmERXRjRjLk$lLkq%:6%BLk$lm&+#&,&A&AEX&X# %m    Cr    r   r   output_hidden_statesreturn_dictc                    U(       a  SOS nU(       a  SOS nUb  UR                  S5      R                  SSUR                  S   5      nSX) '   U R                  (       a  Ub  SU;   a  UOS nOSUS S 2S S S S 24   R	                  UR
                  S9-
  nU[        R                  " UR
                  5      R                  -  nUR                  UR                  S   SUR                  S   UR                  S   5      nU R                  U5      n	X-   nU R                  U5      nU R                  U5      n[        5       =(       d    [        U 5      n
U R                   H  nU(       a  Xa4-   n[        R                   " / 5      nU R"                  (       a  XR$                  R&                  :  a  SOS	nU(       a  U
(       aM  U R(                  (       a0  U R"                  (       a  U R+                  UR,                  UUU5      nOU" XUS
9nUS   nU(       a  SnU(       d  M  UWS   4-   nM     U(       a  Xa4-   nU(       d  [/        S XU4 5       5      $ [1        UUUS9$ )Nr"   r   r   r2   r         ?r  TFrC  NNc              3   .   #    U  H  oc  M  Uv   M     g 7frx   r"   .0vs     r-   	<genexpr>+UniSpeechEncoder.forward.<locals>.<genexpr>       m$[q$[   	last_hidden_stater    r!   )	unsqueezerepeatr   rO  r  r  r(   finfominexpandrL  r   r   r
   r   rN  randr   ra   	layerdropr   r   r   tupler   r6   r    r   r   rS  rT  all_hidden_statesall_self_attentionsexpand_attention_maskposition_embeddingssynced_gpusr   dropout_probabilityskip_the_layerlayer_outputss                  r-   r>   UniSpeechEncoder.forward  sF    #7BD$5b4%$2$<$<R$@$G$G1mNaNabcNd$e!45M01**4B4NSTXfSfmq "%~atQ6F'G'J'JQ^QdQd'J'e!e!/%++m>Q>Q2R2V2V!V!/!6!6"((+Q0D0DR0H.J^J^_aJb" #11-@%;6]302R6LT6R[[E#$58H$H! #(**R.%)]]8KkkNcNc8cTjoN![..4==$($E$E%&)	%M %*%Xi%M !.a 0 ,  &9]1=M<O&O#7 !:   14D Dm]GZ$[mmm++*
 	
r,   rO  ra   r   r   r   rN  rL  NFFT)r#   r$   r%   r&   r4   r(   r   r   r   r   r>   r+   rA   rB   s   @r-   rI  rI    se    Y 26"'%* G
||G
 !.G
  	G

 #G
 G
 G
r,   rI  c                   J   ^  \ rS rSrU 4S jrS\R                  4S jrSrU =r	$ )UniSpeechAttnAdapterLayeri"  c                   > [         TU ]  5         UR                  U l        UR                  U l        [        R                  " U R
                  5      U l        [        R                  " U R
                  U R                  5      U l
        [        R                  " 5       U l        [        R                  " U R                  U R
                  5      U l        g)z
Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
up training throughput.
N)r3   r4   adapter_attn_dim	input_dimrR   
hidden_dimrP   r   normr   linear_1ReLUact_fnlinear_2r   s     r-   r4   "UniSpeechAttnAdapterLayer.__init__#  s    
 	00 ,,LL1			$//4>>Bggi		$..$//Br,   r    c                     U R                  U5      nU R                  U5      nU R                  U5      nU R                  U5      nU$ rx   )r}  r~  r  r  r=   s     r-   r>   !UniSpeechAttnAdapterLayer.forward1  s@    		-0m4M2m4r,   )r  r|  r{  r~  r  r}  )
r#   r$   r%   r&   r4   r(   r)   r>   r+   rA   rB   s   @r-   rx  rx  "  s     CU%6%6  r,   rx  c                   t   ^  \ rS rSrU 4S jr  SS\R                  S\\R                     S\4S jjr	Sr
U =r$ )	$UniSpeechEncoderLayerStableLayerNormi;  c                 
  > [         TU ]  5         [        UR                     " UR                  UR
                  UR                  SS9U l        [        R                  " UR                  5      U l        [        R                  " UR                  UR                  S9U l        [        U5      U l        [        R                  " UR                  UR                  S9U l        [%        USS 5      b  ['        U5      U l        g S U l        g )NFr8  r   rz  )r3   r4   r9  r:  rR   r;  r<  r=  rP   r   r-  r   r   r   r   r"  r>  r?  getattrrx  adapter_layerr   s     r-   r4   -UniSpeechEncoderLayerStableLayerNorm.__init__<  s    4V5P5PQ((00,,	
 zz&"7"78,,v'9'9v?T?TU08 "V-?-?VEZEZ [6-t4@!:6!BD!%Dr,   r    r   r   c                    UnU R                  U5      nU R                  XUS9u  pnU R                  U5      nXA-   nXR                  U R	                  U5      5      -   nU R
                  b  XR                  U5      -   nU4nU(       a  Xu4-  nU$ rB  )r   r=  r   r>  r?  r  rD  s           r-   r>   ,UniSpeechEncoderLayerStableLayerNorm.forwardN  s     &6)-L] *8 *
&Q ]3%5%(9(9$:O:OP]:^(__)),>,>},MMM "&Gr,   )r  r=  r   r>  r?  r   r   )r#   r$   r%   r&   r4   r(   r   r   r   r>   r+   rA   rB   s   @r-   r  r  ;  sC    &* 26"'	|| !.  	 r,   r  c                   :   ^  \ rS rSrU 4S jr    SS jrSrU =r$ )UniSpeechEncoderStableLayerNormih  c                   > [         TU ]  5         Xl        [        U5      U l        [
        R                  " UR                  UR                  S9U l	        [
        R                  " UR                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[!        U5      PM     sn5      U l        SU l        UR&                  S:H  U l        g s  snf rK  )r3   r4   ra   rD   rL  rP   r   rR   r   r   r   r-  r   r   r   rM  r  rN  r   r:  rO  rP  s      r-   r4   (UniSpeechEncoderStableLayerNorm.__init__i  s    >vF,,v'9'9v?T?TUzz&"7"78mmCHIaIaCbcCba1&9Cbc
 ',#&,&A&AEX&X# drR  c                    U(       a  SOS nU(       a  SOS nUb  UR                  S5      R                  SSUR                  S   5      nXR                  UR                  S9-  nU R
                  (       a  Ub  SU;   a  UOS nOSUS S 2S S S S 24   R                  UR                  S9-
  nU[        R                  " UR                  5      R                  -  nUR                  UR                  S   SUR                  S   UR                  S   5      nU R                  U5      n	X-   nU R                  U5      n[        5       =(       d    [        U 5      n
U R                   H  nU(       a  Xa4-   n[        R                  " / 5      nU R                   (       a  XR"                  R$                  :  a  SOS	nU(       a  U
(       aM  U R&                  (       a0  U R                   (       a  U R)                  UR*                  UUU5      nOU" XUS
9nUS   nU(       a  SnU(       d  M  UWS   4-   nM     U R-                  U5      nU(       a  Xa4-   nU(       d  [/        S XU4 5       5      $ [1        UUUS9$ )Nr"   r   r   r2   rW  r   rV  TFrC  rX  c              3   .   #    U  H  oc  M  Uv   M     g 7frx   r"   rZ  s     r-   r]  :UniSpeechEncoderStableLayerNorm.forward.<locals>.<genexpr>  r_  r`  ra  )rc  rd  r   r  r  rO  r(   re  rf  rg  rL  r   r
   r   rN  rh  r   ra   ri  r   r   r   r   rj  r   rk  s                  r-   r>   'UniSpeechEncoderStableLayerNorm.forwardu  sY    #7BD$5b4%$2$<$<R$@$G$G1mNaNabcNd$e!),D,D=K^K^,D,__M**4B4NSTXfSfmq "%~atQ6F'G'J'JQ^QdQd'J'e!e!/%++m>Q>Q2R2V2V!V!/!6!6"((+Q0D0DR0H.J^J^_aJb" #11-@%;]302R6LT6R[[E#$58H$H! #(**R.%)]]8KkkNcNc8cTjoN![ ..4==$($E$E%&)	%M %*%Xi%M !.a 0 ,  &9]1=M<O&O#9 !< 6 14D Dm]GZ$[mmm++*
 	
r,   ru  rv  r@   rB   s   @r-   r  r  h  s#    
Y "I
 I
r,   r  c                   B   ^  \ rS rSrSrU 4S jr\S 5       rS rSr	U =r
$ )UniSpeechGumbelVectorQuantizeri  z
Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
c                 8  > [         TU ]  5         UR                  U l        UR                  U l        UR                  U R                  -  S:w  a&  [        SUR                   SU R                   S35      e[        R                  " [        R                  " SU R                  U R
                  -  UR                  U R                  -  5      5      U l        [        R                  " UR                  S   U R                  U R
                  -  5      U l        SU l        g )Nr   z`config.codevector_dim z5 must be divisible by `config.num_codevector_groups` z for concatenationr   r   r2   )r3   r4   num_codevector_groupsr   num_codevectors_per_groupnum_varscodevector_dimr   rP   	Parameterr(   r)   codevectorsr   rn   weight_projtemperaturer   s     r-   r4   'UniSpeechGumbelVectorQuantizer.__init__  s     6688  4??2a7)&*?*?)@ A559__4EEWY  <<a4==!@&BWBW[_[j[jBjk
 99V__R%8$//DMM:YZ r,   c           	          U R                  SS9n[        R                  " [        R                  " U[        R                  " US-   5      -  SS9* 5      R                  5       nU$ )Nr   r   gHz>r   )meanr(   expsumlog)probsmarginal_probs
perplexitys      r-   _compute_perplexity2UniSpeechGumbelVectorQuantizer._compute_perplexity  sR    *YY		.599^VZEZ;[*[ac ddeiik
r,   c                    UR                   u  p#nU R                  U5      nUR                  X#-  U R                  -  S5      nU R                  (       a  [
        R                  R                  UR                  5       U R                  SS9R                  U5      n[        R                  " UR                  X#-  U R                  S5      R                  5       SS9nU R                  U5      nOyUR                  SS9nUR                  " UR                   6 R!                  SUR                  SS5      S5      nUR                  X#-  U R                  S5      nU R                  U5      nUR                  X#-  S5      nUR#                  S5      U R$                  -  n	U	R                  X#-  U R                  U R&                  S5      n
U
R)                  S5      R                  X#S5      n
X4$ )Nr   T)tauhardr   r   rV  r   )r   r  r   r   r   rP   r   gumbel_softmaxr   r  type_asr(   r   r  argmax	new_zerosscatter_rc  r  r  r  )r6   r    
batch_sizesequence_lengthrR   codevector_probscodevector_soft_distr  codevector_idxcodevectors_per_groupr  s              r-   r>   &UniSpeechGumbelVectorQuantizer.forward  s   3@3F3F0
[ ((7%**:+G$//+Y[]^==!}};;##%4+;+;$  <  gm$ 
 $)=="":#?RTU[[]ce$  112FGJ +11b19N,668K8KLUUN''A.   044Z5QSWSbSbdfg112BCJ+001MrR 0 : :2 >AQAQ Q+001Mt`d`m`moqr!oob)..zBO&&r,   )r  r   r  r  r  )r#   r$   r%   r&   r'   r4   staticmethodr  r>   r+   rA   rB   s   @r-   r  r    s+    
(  
#' #'r,   r  c                       \ rS rSr\rSrSrSrSr	Sr
S rS\\R                  \4   4S jrS\S	\R                  4S
 jrSrg)UniSpeechPreTrainedModeli  	unispeechr   Tc           
         [        U[        5      (       a  UR                  R                  R                  R                  SSS9  UR                  R                  R                  R                  5         [        R                  R                  UR                  5        g[        U[        5      (       a  [        R                  R                  UR                  R                  SS[        R                  " SUR                  R                   S   UR                  R"                  -  -  5      -  S9  [        R                  R%                  UR                  R                  S5        g[        U[&        5      (       a  [        R                  " SUR(                  R*                  -  5      n[        R                  R                  UR(                  R                  U* US9  [        R                  R                  UR(                  R                  U* US9  g[        U[        R,                  5      (       ak  UR                  R                  R                  SU R.                  R0                  S9  UR                  b%  UR                  R                  R                  5         gg[        U[        R2                  [        R4                  45      (       aJ  UR                  R                  R                  5         UR                  R                  R7                  S5        g[        U[        R8                  5      (       a  [        R                  R;                  UR                  5        UR                  bh  [        R                  " UR<                  UR"                  UR                   S   -  -  5      n[        R                  R                  UR                  U* US9  ggg)	zInitialize the weightsr   r   )r  stdr   r2   )abNrV  )r(  r  r  rL   datanormal_rm   zero_rP   inituniform_r  rD   rT   mathsqrtrG   in_channels	constant_r   r   in_featuresr   ra   initializer_ranger   r   fill_rQ   kaiming_normal_rI   )r6   moduleks      r-   _init_weights&UniSpeechPreTrainedModel._init_weights  s    f<==%%**222C##((..0GGV//0 @AAGGOO""		!v{{'>'>q'AFKKD[D['["\]]  
 GGfkk..2 :;;		!f//;;;<AGGV..55!qAGGV..33rQ?		**MM&&CT[[5R5R&S{{&  &&( 'r|| <==KK""$MM$$S)		**GG##FMM2{{&IIfmmv/A/AFDVDVWXDY/YZ[  a 8 ' +r,   input_lengthsc                     S n[        U R                  R                  U R                  R                  5       H  u  p4U" XU5      nM     U$ )z8
Computes the output length of the convolutional layers
c                 8    [         R                  " X-
  USS9S-   $ )Nfloor)rounding_moder   )r(   div)input_lengthrG   rl   s      r-   _conv_out_lengthSUniSpeechPreTrainedModel._get_feat_extract_output_lengths.<locals>._conv_out_length6  s      99\7wWZ[[[r,   )zipra   rq   rr   )r6   r  r  rG   rl   s        r-    _get_feat_extract_output_lengths9UniSpeechPreTrainedModel._get_feat_extract_output_lengths1  sG    
	\
 $'t{{'>'>@W@W#XK,]PM $Y r,   feature_vector_lengthr   c                    UR                  SS9S S 2S4   nU R                  U5      R                  [        R                  5      nUR
                  S   n[        R                  " XQ4UR                  UR                  S9nSU[        R                  " UR
                  S   UR                  S9US-
  4'   UR                  S/5      R                  S5      R                  S/5      R                  5       nU$ )Nr   r   r   )r  devicer   )r  )cumsumr  r  r(   longr   zerosr  r  arangeflipr   )r6   r  r   non_padded_lengthsoutput_lengthsr  s         r-   "_get_feature_vector_attention_mask;UniSpeechPreTrainedModel._get_feature_vector_attention_mask@  s     ,22r2:1b5A>>?QRUUV[V`V`a#))!,
/~7K7KTbTiTi
 uv^%9%9!%<^EZEZ[]kno]opq',,bT299"=BBB4HMMOr,   r"   N)r#   r$   r%   r&   r   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_supports_flash_attn_2_supports_sdpar  r   r(   
LongTensorr   r  r  r+   r"   r,   r-   r  r    s`    "L#$O&*#!N9BeEDTDTVYDY>Z  ]b]m]m r,   r  r   	mask_probmask_lengthr   	min_masksr   c           	        ^^^^^ U u  nmTS:  a  [        S5      eTT:  a  [        ST ST S35      e[        R                  R                  S5      R	                  5       mUUUUU4S jnUb-  UR                  5       R                  S5      R                  5       O[        U5       Vs/ s H  nTPM     snn[        R                  " UT4[        S	9n	/ n
U" T5      nUS
:X  a  U	$ U H  nU" U5      n[        R                  R                  [        R                  " UTS-
  -
  5      USS9n[        U5      S
:X  a  TS-
  nOUS
   n[        R                  " U[        R                  " X-
  [        R                   S	9U-  /5      nU
R#                  U5        M     [        R$                  " U
5      n
[        R&                  " U
SS2SS2S4   X[T45      n
U
R)                  X[T-  5      n
[        R                  " T5      SSSS24   n[        R&                  " UX[T45      R)                  X[T-  5      nU
U-   n
U
R+                  5       TS-
  :  a  TS-
  XTS-
  :  '   [        R,                  " XSS5        U	$ s  snf )a*  
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.

Args:
    shape: The shape for which to compute masks. This should be of a tuple of size 2 where
           the first element is the batch size and the second element is the length of the axis to span.
    mask_prob:  The percentage of the whole axis (between 0 and 1) which will be masked. The number of
                independently generated mask spans of length `mask_length` is computed by
                `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
                actual percentage will be smaller.
    mask_length: size of the mask
    min_masks: minimum number of masked spans
    attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
                    each batch dimension.
r   z&`mask_length` has to be bigger than 0.zO`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: z and `sequence_length`: `c                    > [        TU -  T-  T-   5      n[        UT5      nUT-  T:  a  TT-  nU TS-
  -
  U:  a  [        U TS-
  -
  S5      nU$ )z;Given input length, compute how many spans should be maskedr   r   )r   max)r  num_masked_spanepsilonr  r  r  r  s     r-   compute_num_masked_span6_compute_mask_indices.<locals>.compute_num_masked_spanv  so    i,6DwNOoy9 [(?:-<O ;?+o=!,+/"BAFOr,   Nr   rW  r   F)replace)r   nprandomrh  itemdetachr  tolistr   r  r   choicer  lenconcatenateonesint32appendarraybroadcast_tor   r  put_along_axis)r   r  r  r   r  r  r  r   r  spec_aug_maskspec_aug_mask_idxsmax_num_masked_spanr  r  spec_aug_mask_idxdummy_mask_idxoffsetsr  r  s    `` `            @@r-   _compute_mask_indicesr  P  s   0 #(JQABB_$]^i]j&&7q:
 	
 iinnQ$$&G $ % 	##B'..0',Z'89'8!o'89  HHj/:$GM1/Ba%1,? II,,IIlkAo67RW - 
  !Q& -q0N.q1NNN(;(MUWU]U] ^ao op
 	!!"34/ &2 "45 1a:&+(V ,33JVa@ab ii$T4]3Goog
'UV^^+5G ,g5 /A"55GVYZGZ!0CCD mB?w :s   (I0c                   >  ^  \ rS rSrS\4U 4S jjr  SS\R                  S\\R                     S\\R                     4S jjr
\     SS\\R                     S\\R                     S\\R                     S	\\   S
\\   S\\   S\\\4   4S jj5       rSrU =r$ )UniSpeechModeli  ra   c                   > [         TU ]  U5        Xl        [        U5      U l        [        U5      U l        UR                  S:  d  UR                  S:  aG  [        R                  " [        R                  " UR                  5      R                  5       5      U l        UR                   (       a  [#        U5      U l        O['        U5      U l        U R)                  5         g )Nr   )r3   r4   ra   r   feature_extractorr   feature_projectionmask_time_probmask_feature_probrP   r  r(   r   rR   r  masked_spec_embeddo_stable_layer_normr  encoderrI  	post_initr   s     r-   r4   UniSpeechModel.__init__  s     !8!@"<V"D  3&&*B*BS*H%'\\%,,v?Q?Q2R2[2[2]%^D"&&:6BDL+F3DL 	r,   r    mask_time_indicesr   c                    [        U R                  SS5      (       d  U$ UR                  5       u  pEnUb(  U R                  R	                  UR
                  5      X'   OU R                  R                  S:  a  U R                  (       a  [        XE4U R                  R                  U R                  R                  UU R                  R                  S9n[        R                  " X!R                  [        R                  S9nU R                  R	                  UR
                  5      X'   U R                  R                  S:  a  U R                  (       a  [        XF4U R                  R                  U R                  R                   U R                  R"                  S9n[        R                  " XqR                  [        R                  S9nUSS2S4   R%                  SUS5      nSX'   U$ )	z
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
apply_spec_augmentTNr   )r  r  r   r  )r  r  )r  r  r  r   )r  ra   r   r  r  r  r  r   r  mask_time_lengthmask_time_min_masksr(   r   r  r   r  mask_feature_lengthmask_feature_min_masksrg  )r6   r    r  r   r  r  rR   mask_feature_indicess           r-   _mask_hidden_states"UniSpeechModel._mask_hidden_states  s    t{{$8$??   4A3E3E3G0
[(/3/E/E/H/HI\I\/]M,[[''!+ 5-++44 KK88-++99! !&->G[G[chcmcm n/3/E/E/H/HI\I\/]M,;;((1,#8)++77 KK;;++<<	$  $)<<0DMaMainisis#t #74#@#G#GO]_#` 23M/r,   r   r   rS  rT  r   c                    Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nU R	                  U5      nUR                  SS5      nUb  U R                  UR                  S   U5      nU R                  U5      u  pU R                  XUS9nU R                  UUUUUS9n	U	S   nU(       d	  X4U	SS -   $ [        UUU	R                  U	R                  S9$ )a  
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
    masked extracted features in *config.proj_codevector_dim* space.
Nr   r2   )r  r   r   r   rS  rT  r   )rb  extract_featuresr    r!   )ra   r   rS  use_return_dictr  re   r  r   r  r'  r  UniSpeechBaseModelOutputr    r!   )
r6   r   r   r  r   rS  rT  r+  r    encoder_outputss
             r-   r>   UniSpeechModel.forward  s7    2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]11,?+55a;%!DDEUE[E[\]E^`noN*.*A*ABR*S'00~ 1 
 ,,)/!5# ' 
 (*!4qr7JJJ'+-)77&11	
 	
r,   )ra   r  r  r  r  rX  NNNNN)r#   r$   r%   r&   r   r4   r(   r)   r   r  r'  r   r   r   r   r   r-  r>   r+   rA   rB   s   @r-   r  r    s     ( :>59	,((, $E$5$56, !!1!12	,\  269=,0/3&*2
u||,2
 !.2
 $E$5$56	2

 $D>2
 'tn2
 d^2
 
u..	/2
 2
r,   r  zZ
    UniSpeech Model with a vector-quantization module and ctc loss for pre-training.
    )custom_introc                   8  ^  \ rS rSrS\4U 4S jjrS\4S jrS rS r	\
 SS\R                  S	\R                  S
\R                  S\4S jj5       r\    SS\\R                      S\\R                      S\\   S\\   S\\   S\\\4   4S jj5       rSrU =r$ )UniSpeechForPreTrainingiA  ra   c                 8  > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  5      U l        [        U5      U l	        [        R                  " UR                  UR                  5      U l        [        R                  " UR                  UR                  5      U l        [        R                  " UR                  UR                   5      U l        [        R
                  " UR$                  5      U l        U R)                  5         g rx   )r3   r4   r  r  rP   r   feat_quantizer_dropoutdropout_featuresr  	quantizerr   r  proj_codevector_dim	project_qrR   project_hidnum_ctc_classesctc_projfinal_dropoutr   r  r   s     r-   r4    UniSpeechForPreTraining.__init__G  s     '/ "

6+H+H I7?6#8#8&:T:TU99V%?%?ASAST		&"4"4f6L6LMzz&"6"67 	r,   r  c                 $    XR                   l        g)zR
Set the Gumbel softmax temperature to a given value. Only necessary for training
N)r7  r  )r6   r  s     r-   set_gumbel_temperature.UniSpeechForPreTraining.set_gumbel_temperatureV  s     &1"r,   c                 Z    [         R                  " S[        5        U R                  5         gz
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.NwarningswarnFutureWarningfreeze_feature_encoderr6   s    r-   freeze_feature_extractor0UniSpeechForPreTraining.freeze_feature_extractor\  '    
 	Q	

 	##%r,   c                 L    U R                   R                  R                  5         g
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
Nr  r  r   rJ  s    r-   rI  .UniSpeechForPreTraining.freeze_feature_encoderh      
 	((;;=r,   target_featuresnegative_featurespredicted_featuresc                     [         R                  " X/SS9n [         R                  " UR                  5       U R                  5       SS9nUR	                  U 5      nXC-  nU$ )z
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
r   r   r   )r(   r   cosine_similarityr   r  )rT  rU  rV  r  logitss        r-   compute_contrastive_logits2UniSpeechForPreTraining.compute_contrastive_logitso  s\      ))_$HaP(();)A)A)C_EZEZE\bde0 %r,   r   r   r   rS  rT  r   c           	         Ub  UOU R                   R                  nU R                  UUUUUS9nUS   nU R                  US   5      nU R	                  U5      u  pU R                  U	R                  U R
                  R                  R                  5      5      n	U R                  U	5      n	[        R                  " UR                  S5      UR                  S5      5      R                  U R                   R                  5      nUR                  SS5      n[        R                   " U5      R#                  5       R                  UR$                  5      nUR                  SS5      nUR'                  S5      nUR)                  US5      U	R)                  U) S5      -   nU R+                  U5      nU R-                  U5      nSnU(       d  Ub
  XX4USS -   $ XyU
4USS -   $ [/        UUU	U
UR0                  UR2                  S9$ )	a[  
Example:

```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, UniSpeechForPreTraining

>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-large-1500h-cv")
>>> model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv")
>>> # TODO: Add full pretraining example
```Nr*  r   r   r   r   r2   )r   r   r   r   r    r!   )ra   r,  r  r6  r7  r9  r  rL   r  r:  r(   emptyr   r  replace_probre   	bernoullir   r  rc  masked_fillr   r<  r   r    r!   )r6   r   r   r   rS  rT  rF  transformer_featuresr+  quantized_featuresr   prob_replace_matrixsampled_replace_matrixrY  r   s                  r-   r>   UniSpeechForPreTraining.forward  s   * &1%<k$++B]B]..)/!5# ! 
  'qz  00<48NNCS4T1 "^^,>,A,A$..BWBWB]B],^_!--.@A#kk*>*C*CA*FH\HaHabcHdekkKK$$
 2;;AqA!&1D!E!J!J!L!O!OPdPkPk!l!7!A!A!Q!G!7!A!A"!E%112H#N**,B+BCH

 f%v& 4F^ahijikalll(>STW^_`_aWbbb,1'9"7!//))
 	
r,   )r<  r   r6  r:  r9  r7  r  )r   )NNNN)r#   r$   r%   r&   r   r4   r   r@  rK  rI  r  r(   r)   rZ  r   r   r   r   r   r   r   r>   r+   rA   rB   s   @r-   r3  r3  A  s    1# 1
&> 
 	** ,, "-- 	 &  26,0/3&*D
u||,D
 !.D
 $D>	D

 'tnD
 d^D
 
u33	4D
 D
r,   r3  r2   zq
    UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).
    c                      ^  \ rS rSrSS\\   4U 4S jjjrS rS rS r	S r
\     SS\\R                     S	\\R                     S
\\   S\\   S\\   S\\R                     S\\\4   4S jj5       rSrU =r$ )UniSpeechForCTCi  target_langc                   > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  5      U l        X l        UR                  c  [        SU R                   S35      e[        US5      (       a  UR                  (       a  UR                  OUR                  n[        R                   " X1R                  5      U l        U R%                  5         g)a  
target_lang (`str`, *optional*):
    Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
    adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechForCTC`] with adapters. Uses 'eng' by
    default.
NzYou are trying to instantiate z with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.add_adapter)r3   r4   r  r  rP   r   r=  r   rh  
vocab_sizer   r8   rV   rj  output_hidden_sizerR   r   lm_headr  )r6   ra   rh  rl  r8   s       r-   r4   UniSpeechForCTC.__init__  s     	 '/zz&"6"67&$00@ AH H  *1)G)GFL^L^F%%djdvdv 	 yy!35F5FG 	r,   c                     U R                   nUb'  [        U R                  SS5      c  [        SU S35      eUc.  [        U R                  SS5      b  [        R                  S5        gUb  U R                  USS9  gg)a  
This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
passing `target_lang=...` to `from_pretrained(...)`.

This method is **not** supposed to be called by the user and is prone to be changed in the future.
Nrz  zCannot pass `target_lang`: z- if `config.adapter_attn_dim` is not defined.z)By default `target_lang` is set to 'eng'.T)
force_load)rh  r  ra   r   r  infoload_adapter)r6   rh  s     r-   tie_weightsUniSpeechForCTC.tie_weights  s     &&"wt{{<NPT'U']:;-Gtuvv WT[[:Ld%S%_KKCD$kd; %r,   c                 Z    [         R                  " S[        5        U R                  5         g)rP  rD  NrE  rJ  s    r-   rK  (UniSpeechForCTC.freeze_feature_extractor  rM  r,   c                 L    U R                   R                  R                  5         grO  rQ  rJ  s    r-   rI  &UniSpeechForCTC.freeze_feature_encoder  rS  r,   c                 T    U R                   R                  5        H
  nSUl        M     gz
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
FNr  r   r   r   s     r-   freeze_base_model!UniSpeechForCTC.freeze_base_model  #    
 ^^..0E"'E 1r,   r   r   r   rS  rT  labelsr   c                    Ub  UOU R                   R                  nUbJ  UR                  5       U R                   R                  :  a"  [	        SU R                   R                   35      eU R                  UUUUUS9nUS   nU R                  U5      nU R                  U5      n	Sn
UGbX  Ub  UO"[        R                  " U[        R                  S9nU R                  UR                  S5      5      R                  [        R                  5      nUS:  nUR                  S5      nUR                  U5      n[        R                   R#                  U	S[        R$                  S9R'                  SS5      n[        R(                  R*                  R-                  S	S
9   [        R                   R/                  UUUUU R                   R0                  U R                   R2                  U R                   R4                  S9n
SSS5        U(       d  U	4U[6        S -   nU
b  U
4U-   $ U$ [9        XUR:                  UR<                  S9$ ! , (       d  f       NL= f)a  
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
    Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
    the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
    All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
    config.vocab_size - 1]`.
Nz$Label values must be <= vocab_size: r*  r   rW  r   )rN   r  r   F)enabled)blank	reductionzero_infinityr   rY  r    r!   )ra   r,  r  rk  r   r  r   rm  r(   	ones_liker  r  r  r  masked_selectrP   r   log_softmaxr  re   backendscudnnflagsctc_losspad_token_idctc_loss_reductionctc_zero_infinity_HIDDEN_STATES_START_POSITIONr   r    r!   )r6   r   r   r   rS  rT  r  rF  r    rY  r   r  labels_masktarget_lengthsflattened_targets	log_probsoutputs                    r-   r>   UniSpeechForCTC.forward!  s   " &1%<k$++B]B]&**,$++2H2H"HCDKKDZDZC[\]]..)/!5# ! 
  
]3m, #1"<%//R^fkfpfpBq  !AA.BTBTUWBXY\\]b]g]ghM !A+K(__R0N & 4 4[ A 11&b1V``abdefI%%++E+:}}--%!"++22"kk<<"&++"?"? .  ; Y)F)G!HHF)-)9TGf$EvEG4I4IV]VhVh
 	
 ;:s   A H??
I)r   rm  rh  r  rx   r0  )r#   r$   r%   r&   r   r*  r4   rs  rK  rI  r|  r   r(   r   r   r   r   r   r>   r+   rA   rB   s   @r-   rg  rg    s    HSM  :<*
&>(  26,0/3&*)-D
u||,D
 !.D
 $D>	D

 'tnD
 d^D
 &D
 
un$	%D
 D
r,   rg  z
    UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
    SUPERB Keyword Spotting.
    c                      ^  \ rS rSrU 4S jrS rS rS r\     SS\	\
R                     S\	\
R                     S\	\   S	\	\   S
\	\   S\	\
R                     S\\\4   4S jj5       rSrU =r$ )"UniSpeechForSequenceClassificationii  c                 "  > [         TU ]  U5        [        US5      (       a  UR                  (       a  [	        S5      e[        U5      U l        UR                  S-   nUR                  (       a2  [        R                  " [        R                  " U5      U-  5      U l        [        R                  " UR                  UR                   5      U l        [        R                  " UR                   UR$                  5      U l        U R)                  5         g )Nrj  z`Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)r   )r3   r4   rV   rj  r   r  r  rM  use_weighted_layer_sumrP   r  r(   r  layer_weightsr   rR   classifier_proj_size	projector
num_labels
classifierr  )r6   ra   
num_layersr8   s      r-   r4   +UniSpeechForSequenceClassification.__init__p  s     6=))f.@.@r  (/--1
((!#ejj.Dz.Q!RD6#5#5v7R7RS))F$?$?ARARS 	r,   c                 Z    [         R                  " S[        5        U R                  5         grC  rE  rJ  s    r-   rK  ;UniSpeechForSequenceClassification.freeze_feature_extractor  rM  r,   c                 L    U R                   R                  R                  5         grO  rQ  rJ  s    r-   rI  9UniSpeechForSequenceClassification.freeze_feature_encoder  rS  r,   c                 T    U R                   R                  5        H
  nSUl        M     grz  r{  r   s     r-   r|  4UniSpeechForSequenceClassification.freeze_base_model  r~  r,   r   r   r   rS  rT  r  r   c                 0   Ub  UOU R                   R                  nU R                   R                  (       a  SOUnU R                  UUUUUS9nU R                   R                  (       ai  U[           n[
        R                  " USS9n[        R                  R                  U R                  SS9n	XR                  SSS5      -  R                  SS9nOUS   nU R                  U5      nUc  UR                  SS9n
OU R                  UR                   S   U5      nUR#                  S5      R%                  SSUR                   S   5      nS	X) '   UR                  SS9UR                  SS9R                  SS5      -  n
U R'                  U
5      nSnUbF  [)        5       nU" UR                  SU R                   R*                  5      UR                  S5      5      nU(       d  U4U[        S -   nUb  U4U-   $ U$ [-        UUUR.                  UR0                  S
9$ )aj  
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
    Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
    into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
    soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
    conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
    config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
    `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
NTr*  r   r   r   r   r2   r   r  )ra   r,  r  r  r  r(   stackrP   r   r   r  r   r  r  r  r  r   rc  rd  r  r   r  r   r    r!   )r6   r   r   r   rS  rT  r  rF  r    norm_weightspooled_outputpadding_maskexpand_padding_maskrY  r   loss_fctr  s                    r-   r>   *UniSpeechForSequenceClassification.forward  s   , &1%<k$++B]B]'+{{'I'ItOc..)/!5# ! 
 ;;--#$ABM!KK1=M==001C1C0LL*->->r1a-HHMMRSMTM#AJM}5!)..1.5MBB=CVCVWXCY[ijL"."8"8"<"C"CAq-J]J]^_J`"a25M./)--!-4|7G7GA7G7N7S7STVXY7ZZM/')HFKKDKK,B,BCV[[QS_UDY)F)G!HHF)-)9TGf$EvE'!//))	
 	
r,   )r  r  r  r  r0  )r#   r$   r%   r&   r4   rK  rI  r|  r   r   r(   r   r   r   r   r   r>   r+   rA   rB   s   @r-   r  r  i  s    "
&>(  26,0/3&*)-A
u||,A
 !.A
 $D>	A

 'tnA
 d^A
 &A
 
u..	/A
 A
r,   r  )rg  r3  r  r  r  r;   )Ir  rF  dataclassesr   typingr   r   r   numpyr  r(   torch.nnrP   r   activationsr	   integrations.deepspeedr
   integrations.fsdpr   modeling_flash_attention_utilsr   r   modeling_outputsr   r   r   r   r   modeling_utilsr   rU   r   r   configuration_unispeechr   r   
get_loggerr#   r  r   Moduler/   rD   rh   r|   r   r   r   r   r  r  r"  r9  r6  rI  rx  r  r  r  r  r   r   r  ndarrayr  r-  r  r3  r  rg  r  __all__r"   r,   r-   <module>r     s     ! ) )    % ! @ 7 h  . , 4 J 
		H	%  :K  :  :FBII *ryy *ZBII *")) 6")) 0,bii ,^1 1[B [B|w91 w9tf1/ f1R299 2  "1  BII  FR
ryy R
j		 2*299 *ZV
bii V
rC'RYY C'L E E EX 26tc?tt t U--.	t
 t ZZtn 3  s
- s
 s
l 
B
6 B

B
J !"  
S
. S

S
l o
)A o
o
dr,   