
    fTh                        S SK JrJrJrJr  S SKrS SKJr  SSKJ	r	  SSK
JrJrJr  SSKJr  SSKJr  SSKJrJr  SS	KJr  SS
KJr  SSKJrJrJrJrJr  SSKJ r   SSK!J"r"J#r#  SSK$J%r%  SSK&J'r'J(r(J)r)  SSK*J+r+J,r,J-r-  SSK.J/r/J0r0J1r1  SSK2J3r3J4r4  \)Rj                  " \65      r7 " S S\5      r8 " S S\Rr                  5      r: " S S\Rr                  5      r; " S S\+5      r< " S S\,5      r= " S S\/5      r> " S  S!\5      r?\' " S" S#\#5      5       r@ " S$ S%\@5      rA " S& S'\05      rB " S( S)\35      rC\'" S*S+9 " S, S-\@\5      5       rD/ S.QrEg)/    )CallableOptionalTupleUnionN   )ACT2FN)CacheDynamicCacheEncoderDecoderCache)PretrainedConfig)GenerationMixin)_prepare_4d_attention_mask#_prepare_4d_attention_mask_for_sdpa)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPast)BaseModelOutputWithPastAndCrossAttentionsSeq2SeqLMOutputSeq2SeqModelOutput)rope_config_validation)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringcan_return_tuplelogging   )GlmAttentionGlmRotaryEmbeddingapply_rotary_pos_emb)LlamaDecoderLayer
LlamaModeleager_attention_forward)WhisperModelshift_tokens_rightc                   v   ^  \ rS rSrSrSrS/rSSSS.r                        SU 4S	 jjrS
r	U =r
$ )MoonshineConfig.   a!  
This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Moonshine
[UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny).

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    vocab_size (`int`, *optional*, defaults to 32768):
        Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`MoonshineModel`].
    hidden_size (`int`, *optional*, defaults to 288):
        Dimension of the hidden representations.
    intermediate_size (`int`, *optional*, defaults to 1152):
        Dimension of the MLP representations.
    encoder_num_hidden_layers (`int`, *optional*, defaults to 6):
        Number of hidden layers in the Transformer encoder.
    decoder_num_hidden_layers (`int`, *optional*, defaults to 6):
        Number of hidden layers in the Transformer decoder.
    encoder_num_attention_heads (`int`, *optional*, defaults to 8):
        Number of attention heads for each attention layer in the Transformer encoder.
    decoder_num_attention_heads (`int`, *optional*, defaults to 8):
        Number of attention heads for each attention layer in the Transformer decoder.
    encoder_num_key_value_heads (`int`, *optional*):
        This is the number of key_value heads that should be used to implement Grouped Query Attention. If
        `encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
        `encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
        converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
        by meanpooling all the original heads within that group. For more details checkout [this
        paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
        `num_attention_heads`.
    decoder_num_key_value_heads (`int`, *optional*):
        This is the number of key_value heads that should be used to implement Grouped Query Attention. If
        `decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
        `decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
        converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
        by meanpooling all the original heads within that group. For more details checkout [this
        paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
        `decoder_num_attention_heads`.
    pad_head_dim_to_multiple_of (`int`, *optional*):
        Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain
        optimized attention implementations.
    encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the encoder.
    decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
        The non-linear activation function (function or string) in the decoder.
    max_position_embeddings (`int`, *optional*, defaults to 512):
        The maximum sequence length that this model might ever be used with.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    decoder_start_token_id (`int`, *optional*, defaults to 1):
        Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
        are provided to the `generate` function. It is used to guide the model`s generation process depending on
        the task.
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models).
    rope_theta (`float`, *optional*, defaults to 10000.0):
        The base period of the RoPE embeddings.
    rope_scaling (`Dict`, *optional*):
        Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
        and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
        accordingly.
        Expected contents:
            `rope_type` (`str`):
                The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
                'llama3'], with 'default' being the original RoPE implementation.
            `factor` (`float`, *optional*):
                Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
                most scaling types, a `factor` of x will enable the model to handle sequences of length x *
                original maximum pre-trained length.
            `original_max_position_embeddings` (`int`, *optional*):
                Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
                pretraining.
            `attention_factor` (`float`, *optional*):
                Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
                computation. If unspecified, it defaults to value recommended by the implementation, using the
                `factor` field to infer the suggested value.
            `beta_fast` (`float`, *optional*):
                Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
                ramp function. If unspecified, it defaults to 32.
            `beta_slow` (`float`, *optional*):
                Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
                ramp function. If unspecified, it defaults to 1.
            `short_factor` (`List[float]`, *optional*):
                Only used with 'longrope'. The scaling factor to be applied to short contexts (<
                `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                size divided by the number of attention heads divided by 2
            `long_factor` (`List[float]`, *optional*):
                Only used with 'longrope'. The scaling factor to be applied to long contexts (<
                `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                size divided by the number of attention heads divided by 2
            `low_freq_factor` (`float`, *optional*):
                Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
            `high_freq_factor` (`float`, *optional*):
                Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
    partial_rotary_factor (`float`, *optional*, defaults to 0.9):
        Percentage of the query and keys which will have rotary embedding.
    is_encoder_decoder (`bool`, *optional*, defaults to `True`):
        Whether the model is used as an encoder/decoder or not.
    attention_bias (`bool`, *optional*, defaults to `False`):
        Whether to use a bias in the query, key, value and output projection layers during self-attention.
    attention_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for the attention probabilities.
    bos_token_id (`int`, *optional*, defaults to 1):
        Denotes beginning of sequences token id.
    eos_token_id (`int`, *optional*, defaults to 2):
        Denotes end of sequences token id.

Example:

```python
>>> from transformers import MoonshineModel, MoonshineConfig

>>> # Initializing a Moonshine style configuration
>>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny")

>>> # Initializing a model from the configuration
>>> model = MoonshineModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```	moonshinepast_key_valuesencoder_num_key_value_headsencoder_num_attention_headsencoder_num_hidden_layers)num_key_value_headsnum_attention_headsnum_hidden_layersc                 p  > Xl         X l        X0l        X@l        XPl        X`l        Xpl        Uc  UnXl        U	c  Un	Xl        Xl	        Xl
        Xl        Xl        Xl        Xl        UU l        UU l        UU l        UU l        UU l        UU l        UU l        [-        U 5        [.        TU ]`  " SUUUUS.UD6  g )N)bos_token_ideos_token_idis_encoder_decoderdecoder_start_token_id )
vocab_sizehidden_sizeintermediate_sizer.   decoder_num_hidden_layersr-   decoder_num_attention_headsr,   decoder_num_key_value_headspad_head_dim_to_multiple_ofencoder_hidden_actdecoder_hidden_actmax_position_embeddingsinitializer_ranger6   	use_cache
rope_thetarope_scalingpartial_rotary_factorr5   attention_biasattention_dropoutr   super__init__)selfr8   r9   r:   r.   r;   r-   r<   r,   r=   r>   r?   r@   rA   rB   r6   rC   rD   rE   rF   r5   rG   rH   r3   r4   kwargs	__class__s                             g/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/moonshine/modular_moonshine.pyrJ   MoonshineConfig.__init__   s    8 %&!2)B&)B&+F(+F(&.*E'+F(&.*E'+F(+F("4"4'>$!2&<#"$(%:""4,!2 	t$ 	
%%1#9		

 	
    )rG   rH   r@   r<   r;   r=   r6   r?   r-   r.   r,   r9   rB   r:   r5   rA   r>   rF   rE   rD   rC   r8   )i   i   i     rQ      rR   NNNgelusilui   g{Gz?   Tg     @Ng?TF        rU   r   )__name__
__module____qualname____firstlineno____doc__
model_typekeys_to_ignore_at_inferenceattribute_maprJ   __static_attributes____classcell__rM   s   @rN   r(   r(   .   s    {z J#4"5<<8M "#"#$%$%$($($(!! # !3D
 D
rP   r(   c                   b   ^  \ rS rSrU 4S jrS\R                  S\R                  4S jrSrU =r	$ )MoonshineEncoderMLP   c                 
  > [         TU ]  5         Xl        [        U   U l        [
        R                  " UR                  UR                  5      U l	        [
        R                  " UR                  UR                  5      U l
        g NrI   rJ   configr   activation_fnnnLinearr9   r:   fc1fc2rK   rh   
hidden_actrM   s      rN   rJ   MoonshineEncoderMLP.__init__   s\    #J/99V//1I1IJ99V55v7I7IJrP   hidden_statesreturnc                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ rf   )rl   ri   rm   )rK   rq   s     rN   forwardMoonshineEncoderMLP.forward  s4    /**=9/rP   ri   rh   rl   rm   
rW   rX   rY   rZ   rJ   torchTensorrt   r_   r`   ra   s   @rN   rc   rc      s)    KU\\ ell  rP   rc   c                   b   ^  \ rS rSrU 4S jrS\R                  S\R                  4S jrSrU =r	$ )MoonshineDecoderMLPi
  c                   > [         TU ]  5         Xl        [        U   U l        [
        R                  " UR                  UR                  S-  5      U l	        [
        R                  " UR                  UR                  5      U l
        g )Nr   rg   rn   s      rN   rJ   MoonshineDecoderMLP.__init__  sa    #J/99V//1I1IA1MN99V55v7I7IJrP   rq   rr   c                     U R                  U5      nUR                  SSS9u  pU R                  U5      U-  nU R                  U5      nU$ )Nr   )dim)rl   chunkri   rm   )rK   rq   gates      rN   rt   MoonshineDecoderMLP.forward  sQ    /+11!1<**40=@/rP   rv   rw   ra   s   @rN   r{   r{   
  s)    KU\\ ell  rP   r{   c                   x  ^  \ rS rSrS\S\S\S\S\4
U 4S jjr     SS\R                  S	\
\\R                  \R                  4      S
\
\R                     S\
\   S\
\R                     S\
\R                     S\\   S\\R                  \
\R                     \
\\R                        4   4S jjrSrU =r$ )MoonshineAttentioni  rh   	layer_idx	is_causalr0   r/   c                 f  > UR                  XES.5        [        TU ]	  X5        X0l        [	        USUR
                  UR                  -  5      U l        U R                  R                  bA  U R                  R                  nX`R                  U-   S-
  U-  -  nXpR                  -
  U l
        g SU l
        g )N)r0   r/   head_dimrU   r   )updaterI   rJ   r   getattrr9   r0   r   rh   r>   head_dim_padding)	rK   rh   r   r   r0   r/   target_multipletarget_head_dimrM   s	           rN   rJ   MoonshineAttention.__init__  s     	.Ano+"
F4F4F&JdJd4de ;;22>"kkEEO---/2QTU2UZi1ijO$3mm$CD!$%D!rP   rq   position_embeddingsattention_maskpast_key_valuecache_positionkey_value_statesrL   rr   c                    UR                   S S u  pU R                  U5      R                  XU R                  R                  U R
                  5      R                  SS5      n
US LnUb^  UR                  R                  U R                  5      nU(       a&  SUR                  U R                  '   UR                  nOUR                  nUb  UOUnU(       aA  U(       a:  W(       a3  UR                  U R                     nUR                  U R                     nOU R                  U5      R                  USU R                  R                  U R
                  5      R                  SS5      nU R                  U5      R                  USU R                  R                  U R
                  5      R                  SS5      nU(       a$  Ub!  UR!                  XU R                  SU05      u  pU(       d<  Uu  nn[#        XUU5      u  pUb%  UUUS.nUR!                  XU R                  U5      u  p[$        nU R                  R&                  S:w  ad  U R                  R&                  S:X  a-  UR                  S	S
5      (       a  [(        R+                  S5        O[,        U R                  R&                     nU R.                  (       a  Uc  U	S:  a  SOS
nU R0                  S:  a  [2        R4                  R6                  R9                  U
SU R0                  45      n
[2        R4                  R6                  R9                  USU R0                  45      n[2        R4                  R6                  R9                  USU R0                  45      nU" U U
UUU4U R:                  (       d  SOU R<                  U R>                  US.UD6u  nnU R0                  S:  a  USS U R0                  * 24   nURA                  XS5      RC                  5       nU RE                  U5      nUU4$ )Nr   rU   r   Tr   )sincosr   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.r   rV   )dropoutscalingr   .)#shapeq_projviewrh   r/   r   	transpose
is_updatedgetr   cross_attention_cacheself_attention_cache	key_cachevalue_cachek_projv_projr   r!   r$   _attn_implementationloggerwarning_oncer   r   r   rx   rj   
functionalpadtrainingrH   r   reshape
contiguouso_proj)rK   rq   r   r   r   r   r   rL   bszq_lenquery_statesis_cross_attentionr   current_states
key_statesvalue_statesr   r   cache_kwargsattention_interfacer   attn_outputattn_weightss                          rN   rt   MoonshineAttention.forward0  s    #(("-
 KK&++C8W8WY]YfYfgqqrsuvw 	 .T9%'2266t~~FJ!<@))$..9!/!E!E!/!D!D .>-I)}.Z'11$..AJ)55dnnEL N+c2t{{>>N1a  N+c2t{{>>N1a 
 "n&@+9+@+@dnn?OQ_>`,(
 "*HC';LVY[^'_$L)'*3.Y+9+@+@dnnl,(
 )@;;++w6{{//69fjjI\^c>d>d##L
 '>dkk>^>^&_# NN~/E%RS)DY^	  1$ 88..22<!TEZEZA[\L,,00aAVAV=WXJ 88..22<!TEZEZA[\L$7
%
  $}}C$2H2HLL
%
 
%
!\   1$%c+Cd.C.C-C+C&CDK!))#b9DDFkk+.L((rP   )r   r   r   )NNNNN)rW   rX   rY   rZ   r(   intboolrJ   rx   ry   r   r   r	   
LongTensorr   r   rt   r_   r`   ra   s   @rN   r   r     s   && & 	&
 !& !&0 LP15*.5937[)||[) &eELL%,,,F&GH[) !.	[)
 ![) !!1!12[) #5<<0[) -.[) 
u||Xell3XeELL>Q5RR	S[) [)rP   r   c                       \ rS rSrSrg)MoonshineRotaryEmbeddingi  r7   N)rW   rX   rY   rZ   r_   r7   rP   rN   r   r     s    rP   r   c                   4   ^  \ rS rSrS\S\4U 4S jjrSrU =r$ )MoonshineEncoderLayeri  rh   r   c                 4  > [         TU ]  X5        [        UUSUR                  UR                  S9U l        [        XR                  5      U l        [        R                  " UR                  SS9U l        [        R                  " UR                  SS9U l        g )NFrh   r   r   r0   r/   bias)rI   rJ   r   r-   r,   	self_attnrc   r?   mlprj   	LayerNormr9   input_layernormpost_attention_layernormrK   rh   r   rM   s      rN   rJ   MoonshineEncoderLayer.__init__  s}    ++ & B B & B B
 'v/H/HI!||F,>,>UK(*V5G5Ge(T%rP   )r   r   r   r   )	rW   rX   rY   rZ   r(   r   rJ   r_   r`   ra   s   @rN   r   r     s    U U3 U UrP   r   c                      ^  \ rS rSrSS\S\\   4U 4S jjjr           SS\R                  S\\R                     S\\R                     S\\R                     S	\\R                     S
\\R                     S\\   S\\   S\\   S\\R                     S\\\R                  \R                  4      S\\\R                  \R                  4      S\\R                  \\\R                  \R                  4      4   4S jjrSrU =r$ )MoonshineDecoderLayeri  rh   r   c                   > [         TU ]  5         UR                  U l        [        UUSUR                  UR
                  S9U l        [        UUSUR                  UR
                  S9U l        [        XR                  5      U l
        [        R                  " UR                  SS9U l        [        R                  " UR                  SS9U l        [        R                  " UR                  SS9U l        g )NTr   Fr   )rI   rJ   r9   r   r<   r=   r   encoder_attnr{   r@   r   rj   r   r   r   final_layernormr   s      rN   rJ   MoonshineDecoderLayer.__init__  s    !--+ & B B & B B
 / & B B & B B
 'v/H/HI!||F,>,>UK(*V5G5Ge(T%!||F,>,>UKrP   rq   r   encoder_hidden_statesencoder_attention_maskposition_idsencoder_position_idsr   r   rC   r   r   encoder_position_embeddingsrr   c                 F   UnU R                  U5      nU R                  " SUUUUUU	U
US.UD6u  pX-   nS nUb.  UnU R                  U5      nU R                  UUUUUU	S9u  nnX-   nUnU R	                  U5      nU R                  U5      nX-   nU4nU(       a  UUU4-  nU$ )N)rq   r   r   r   r   rC   r   r   )rq   r   r   r   r   rC   r7   )r   r   r   r   r   r   )rK   rq   r   r   r   r   r   r   r   rC   r   r   r   rL   residualself_attn_weightscross_attn_weightsoutputss                     rN   rt   MoonshineDecoderLayer.forward  s     !,,]; ,0>> 
,
')%)/) 3
,
 
,
( !0 " ,$H 99-HM040A0A+!65-"3# 1B 1-M- %4M !,,];/ 0 ")+=>>GrP   )r   r   r9   r   r   r   r   rf   )NNNNNNFFNNN)rW   rX   rY   rZ   r(   r   r   rJ   rx   ry   r   r	   r   r   FloatTensorrt   r_   r`   ra   s   @rN   r   r     su   L L8C= L L6 268<9=37;?*.,1$)59KOSW<||< !.<  (5	<
 !) 6< u//0< 'u'7'78< !< $D>< D>< !!1!12< &eELL%,,,F&GH< &.eELL%,,4N.O%P< 
u  (51B1BEDUDU1U+V"WW	X< <rP   r   c                   d    \ rS rSr\rSrSrSrSS/r	Sr
SrSrSrS rS\R                   4S	 jrS
rg)MoonshinePreTrainedModeli  modelinput_valuesTr   r   c                 P   U R                   R                  n[        U[        R                  [        R
                  45      (       aW  UR                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  [        R                  45      (       aX  UR                  R                  R                  S5        UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       ad  UR                  R                  R                  SUS9  UR                  b2  UR                  R                  UR                     R                  5         g g g )NrV   )meanstdg      ?)rh   rB   
isinstancerj   rk   Conv1dweightdatanormal_r   zero_	GroupNormr   fill_	Embeddingpadding_idx)rK   moduler   s      rN   _init_weights&MoonshinePreTrainedModel._init_weights  s)   kk++fryy"))455MM&&CS&9{{&  &&( 'r|| <==MM$$S){{&  &&( '--MM&&CS&9!!-""6#5#56<<> . .rP   input_lengthsc                 ~    [        US-
  S-  S-   5      n[        US-
  S-  S-   5      n[        US-
  S-  S-   5      nU$ )z8
Computes the output length of the convolutional layers
   @   rU      r   r   )r   )rK   r   output_conv1_lengthoutput_conv2_lengthoutput_conv3_lengths        rN    _get_feat_extract_output_lengths9MoonshinePreTrainedModel._get_feat_extract_output_lengths  sZ     "=3#6""<q"@A!#6#:a"?!"CD!#6#:a"?!"CD""rP   r7   N)rW   rX   rY   rZ   r(   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_flash_attn_2_supports_sdpa_supports_cache_class_supports_static_cacher   rx   r   r   r_   r7   rP   rN   r   r     sR    "L$O&*#02IJ!N !?#e>N>N #rP   r   c                      ^  \ rS rSrSrSrS\4U 4S jjrS\R                  4S jr
S\R                  4S	 jr\    SS\\R                     S
\\R                      S\\   S\\   S\\   S\4S jj5       rSrU =r$ )MoonshineEncoderi!  z
Transformer encoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoonshineEncoderLayer`]

Args:
    config: MoonshineConfig
r   rh   c           	      L  > [         TU ]  U5        Xl        UR                  n[        R
                  " SUSSSS9U l        [        R
                  " USU-  SSS	9U l        [        R
                  " SU-  USSS	9U l        [        R                  " SUS
S9U l
        [        US9U l        [        R                  " [        UR                  5       Vs/ s H  n[!        X5      PM     sn5      U l        [        R$                  " USS9U l        SU l        U R+                  5         g s  snf )NrU   r   r   F)kernel_sizestrider   r   r   r   )r  r  gh㈵>)
num_groupsnum_channelseps)rh   r   )rI   rJ   rh   r9   rj   r   conv1conv2conv3r   	groupnormr   
rotary_emb
ModuleListranger.   r   layersr   
layer_normgradient_checkpointing	post_init)rK   rh   	embed_dimidxrM   s       rN   rJ   MoonshineEncoder.__init__+  s     &&	YYq)ReT
YYy!i-QqQ
YYq9}iQqQ
PTU2&Amm;@AaAa;bc;bC"6/;bc
 ,,yu=&+# ds   D!rr   c                     U R                   $ rf   r  rK   s    rN   get_input_embeddings%MoonshineEncoder.get_input_embeddings?  s    zzrP   valuec                     Xl         g rf   r   )rK   r$  s     rN   set_input_embeddings%MoonshineEncoder.set_input_embeddingsB  s    
rP   r   r   output_hidden_statesflash_attn_kwargsc           	         Ub  UOU R                   R                  nUb  UOU R                   R                  nUc  [        S5      eUR	                  S5      n[
        R                  R                  U R                  U5      5      nU R                  U5      n[
        R                  R                  U R                  U5      5      n[
        R                  R                  U R                  U5      5      nUR                  SSS5      nUb  U R                  UR                  S   5      nSnUSSSU24   SSU24   nU R                   R                   S	:X  a  US
:H  R#                  5       (       a  UOSnOLU R                   R                   S:X  a  U(       d  [%        X&R&                  5      nO[)        X&R&                  5      n[*        R,                  " SUR                  S   UR.                  S9R	                  S5      n	U R1                  Xi5      n
U(       a  SOSnU(       a  SOSnU R2                   H3  nU(       a  X4-  nU" U4UU	UU
S.UD6nUS   nU(       d  M+  XS   4-  nM5     U R5                  U5      nU(       a  X4-  n[7        UUUS9$ )a\  
Args:
    input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
        Float values of the raw speech waveform. Raw speech waveform can be
        obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
        `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
        `input_values`, the [`AutoFeatureExtractor`] should be used for padding
        and conversion into a tensor of type `torch.FloatTensor`.
    attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`:
        - 1 for tokens that are **not masked**,
        - 0 for tokens that are **masked**.
        [What are attention masks?](../glossary#attention-mask)
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
        tensors for more detail.
    output_hidden_states (`bool`, *optional*):
        Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
        more detail.
    return_dict (`bool`, *optional*):
        Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
NzYou must specify input_values.rU   r   r   r     .flash_attention_2rV   r   devicer7   )r   r   r   r   last_hidden_staterq   
attentions)rh   r   r(  
ValueError	unsqueezerj   r   tanhr  r  rS   r  r  permuter   r   r   anyr   dtyper   rx   aranger.  r  r  r  r   )rK   r   r   r   r(  r)  rq   mask_lendownsample_strider   r   all_hidden_statesall_self_attnsencoder_layerlayer_outputss                  rN   rt   MoonshineEncoder.forwardE  s   > 2C1N-TXT_T_TqTq$8$D $++JjJj 	 =>> $--a0**4::l+CD}5**4::m+DE**4::m+DE%--aA6 %<<^=Q=QRT=UVH *+C1D3D1D,DEc9H9nUN{{//3FF4Bc4I3N3N3P3PVZ 11V;DU!D^UhUh!i "<NL_L_!`||A}':':1'=mFZFZ[eefgh #oomJ #7BD0d![[M#!%55!)-)"3$7 $M *!,M  #3"55! )$ 6  !11&++%
 	
rP   )	rh   r  r  r  r  r  r  r  r  )NNNN)rW   rX   rY   rZ   r[   r  r(   rJ   rj   Moduler"  r&  r   r   rx   r   ry   r   r   r   r   rt   r_   r`   ra   s   @rN   r
  r
  !  s     %O (bii "))   5915,0/3c
u001c
 !.c
 $D>	c

 'tnc
 $$89c
 
!c
 c
rP   r
  c                   j  ^  \ rS rSrSrS\4U 4S jjr           SS\\R                     S\\R                     S\\R                     S\\   S\\R                     S	\\   S
\\   S\\   S\\R                     S\\R                     S\\R                     S\\   S\\\4   4S jjrSrU =r$ )MoonshineDecoderi  	input_idsrh   c           	        > [         TU ]  U5        [        R                  " UR                  SS9U l        [        R                  " [        UR                  5       Vs/ s H  n[        X5      PM     sn5      U l
        g s  snf NFr   )rI   rJ   rj   r   r9   normr  r  r;   r   r  )rK   rh   r  rM   s      rN   rJ   MoonshineDecoder.__init__  s`     LL!3!3%@	mm;@AaAa;bc;bC"6/;bc
cs   A>r   r   r+   inputs_embedsrC   r   r(  r   r   r   r)  rr   c                    Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nUSL USL-  (       a  [	        S5      eU R
                  (       a/  U R                  (       a  U(       a  [        R                  S5        SnUc  U R                  U5      nU(       a"  Uc  [        5       n[        5       n[        X5      nU	cD  Ub  UR                  5       OSn[        R                  " XUR                  S   -   UR                   S9n	Uc  U	R#                  S5      nU R%                  X%XU5      nUnU R'                  UU5      nU(       a  SOSnU(       a  SOSnU(       a  U
b  SOSnUb  U
R                  S	   nS
nUSSSU24   SSU24   nU R                   R(                  S:X  a  US:H  R+                  5       (       a  UOSnOjU R                   R(                  S:X  a,  U(       d%  [-        UUR.                  UR                  S	   5      nO$[1        UUR.                  UR                  S	   5      nU R2                   HH  nU(       a  UU4-  nU" U4UUU
UUUUU	US.	UD6nUS   nU(       d  M1  UUS   4-  nU
c  M?  UUS   4-  nMJ     U R5                  U5      nU(       a  UU4-  n[7        UU(       a  UOSUUUS9$ )a\  
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
    Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
    of the decoder.
encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
    Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`:
    - 1 for tokens that are **not masked**,
    - 0 for tokens that are **masked**.
    [What are attention masks?](../glossary#attention-mask)
Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   rU   r-  r7   r+  .r,  rV   r   )	r   r   r   r   r   r   rC   r   r   r   )r0  r+   rq   r1  cross_attentions)rh   r   r(  rC   r2  r  r   r   r   embed_tokensr
   r   get_seq_lengthrx   r8  r   r.  r3  _update_causal_maskr  r   r6  r   r7  r   r  rF  r   )rK   rC  r   r   r+   rH  rC   r   r(  r   r   r   r)  r   r   past_seen_tokenscausal_maskrq   r   r;  r<  all_cross_attentionsr9  r:  decoder_layerr>  s                             rN   rt   MoonshineDecoder.forward  s;   2 2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==Yj I  --i8M0#/> $0N!12F^O!CRC^==?de"\\ ]5H5H5K"KTaThThN )33A6L..>L]
 & #oom\J #7BD0d&7<Q<]rdh "-,2226H *%;CATCTAT<T%UVY[d\d[dVd%e"{{//3FFDZ^aDaCfCfChCh)?nr& 11V;DU)L*M,?,?ATATUWAX*&
 *D*M,?,?ATATUWAX*& "[[M#!m%55!)*'=&;)."3#-$7 $M *!,M  =#3"55(4(]1-=,??(1 )4 		-0  -!118+/8Od+%1
 	
rP   )r  rF  )NNNNNNNNNNN)rW   rX   rY   rZ   r  r(   rJ   r   rx   r   ry   r	   r   r   r   r   r   r   r   rt   r_   r`   ra   s   @rN   rB  rB    s:   !O
 
 151537+/59$(,0/359=A9=A
E,,-A
 !.A
 u//0	A

 "%A
   1 12A
 D>A
 $D>A
 'tnA
 !!1!12A
  ((9(9:A
 !) 6A
 $$89A
 
u--	.A
 A
rP   rB  c                      \ rS rSr\\            SS\\R                     S\\R                     S\\R                     S\\R                     S\\
\
\R                           S\\\\
\R                     4      S	\\
\R                        S
\\
\R                        S\\   S\\   S\\   S\\R                     S\4S jj5       5       rSrg)MoonshineModeli:  Nr   r   decoder_input_idsdecoder_attention_maskencoder_outputsr+   decoder_inputs_embedsdecoder_position_idsrC   r   r(  r   rr   c                 l   U
b  U
OU R                   R                  n
Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	Uc  U R	                  UUU
US9nOK[        U[        5      (       d6  [        US   [        U5      S:  a  US   OS[        U5      S:  a  US   OSS9nU R                  UUUUR                  UUUU	U
UUS9n[        UR                  UR                  UR                  UR                  UR                  UR                  UR                  UR                  S9$ )	a  
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
    Float values of the raw speech waveform. Raw speech waveform can be
    obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
    `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
    `input_values`, the [`AutoFeatureExtractor`] should be used for padding
    and conversion into a tensor of type `torch.FloatTensor`.
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
    Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
    it.

    Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
    [`PreTrainedTokenizer.__call__`] for details.

    [What are input IDs?](../glossary#input-ids)
decoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
    Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

    - 1 for tokens that are **not masked**,
    - 0 for tokens that are **masked**.

    [What are attention masks?](../glossary#attention-mask)

    Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
    [`PreTrainedTokenizer.__call__`] for details.

    If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
    `past_key_values`).

    If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
    and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
    information on the default strategy.

    - 1 indicates the head is **not masked**,
    - 0 indicates the head is **masked**.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
    Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. This
    is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the
    model's internal embedding lookup matrix.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
    config.n_positions - 1]`.

    [What are position IDs?](../glossary#position-ids)

Example:

```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, MoonshineModel
>>> from datasets import load_dataset

>>> model = MoonshineModel.from_pretrained("UsefulSensors/moonshine-tiny")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_values, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 288]
```
N)r   r   r(  r   rU   r   r/  )rC  r   r   r   r+   rH  r   rC   r   r(  r   )r0  r+   decoder_hidden_statesdecoder_attentionsrK  encoder_last_hidden_stater   encoder_attentions)rh   r   r(  rC   encoderr   r   lendecoderr0  r   r+   rq   r1  rK  )rK   r   r   rV  rW  rX  r+   rY  rZ  rC   r   r(  r   decoder_outputss                 rN   rt   MoonshineModel.forward;  s^   ` 2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	"/3||-"3%9	 0< 0O O_==-"1!"4474H14Loa0RV14_1E1I?1-tO FJ\\'1#1"1"C"C+/-/!5) FR F
 "-??+;;"1"?"?.99,==&5&G&G"1"?"?.99	
 		
rP   r7   )NNNNNNNNNNNN)rW   rX   rY   rZ   r   r   r   rx   r   r   r   r   r   r   r   rt   r_   r7   rP   rN   rU  rU  :  sd    59598<=AEIZ^DHBF$(,0/359{
u001{
 !!1!12{
 $E$4$45	{

 !))9)9 :{
 "%e.?.?(@"AB{
 "%(;U5CTCT=U(U"VW{
  (e.?.?(@A{
 'uU-=-='>?{
 D>{
 $D>{
 'tn{
 !!1!12{
 
{
  {
rP   rU  zj
    The Moonshine Model with a language modeling head. Can be used for automatic speech recognition.
    )custom_introc                   "  ^  \ rS rSrS/rS\4U 4S jjrS rS rS r	S r
S	\R                  4S
 jr\\             SS\\R$                     S\\R&                     S\\R&                     S\\R&                     S\\\\R$                           S\\\\\R$                     4      S\\\R$                        S\\\R&                        S\\   S\\   S\\   S\\R&                     S\\R&                     S	\4S jj5       5       rSrU =r$ )!MoonshineForConditionalGenerationi  zproj_out.weightrh   c                    > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  UR                  SS9U l        U R                  5         g rE  )
rI   rJ   rU  r   rj   rk   r9   r8   proj_outr  )rK   rh   rM   s     rN   rJ   *MoonshineForConditionalGeneration.__init__  sH     #F+
		&"4"4f6G6GeT 	rP   c                 6    U R                   R                  5       $ rf   )r   get_encoderr!  s    rN   rl  -MoonshineForConditionalGeneration.get_encoder      zz%%''rP   c                 6    U R                   R                  5       $ rf   )r   get_decoderr!  s    rN   rp  -MoonshineForConditionalGeneration.get_decoder  rn  rP   c                     U R                   $ rf   ri  r!  s    rN   get_output_embeddings7MoonshineForConditionalGeneration.get_output_embeddings  s    }}rP   c                     Xl         g rf   rs  )rK   new_embeddingss     rN   set_output_embeddings7MoonshineForConditionalGeneration.set_output_embeddings  s    &rP   rr   c                 6    U R                   R                  5       $ rf   )r   r"  r!  s    rN   r"  6MoonshineForConditionalGeneration.get_input_embeddings  s    zz..00rP   r   r   rV  rW  rX  r+   rY  rZ  rC   r   r(  r   labelsc                    Ub:  Uc7  Uc4  [        XR                  R                  U R                  R                  5      nU R	                  UUUUUUUUU	U
UUS9nU R                  UR                  5      nSnUb$  U R                  XU R                  R                  S9n[        UUUR                  UR                  UR                  UR                  UR                  UR                  UR                   S9	$ )aw  
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
    Float values of the raw speech waveform. Raw speech waveform can be
    obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
    `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
    `input_values`, the [`AutoFeatureExtractor`] should be used for padding
    and conversion into a tensor of type `torch.FloatTensor`.
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
    Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
    it.

    Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
    [`PreTrainedTokenizer.__call__`] for details.

    [What are input IDs?](../glossary#input-ids)
decoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
    Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

    - 1 for tokens that are **not masked**,
    - 0 for tokens that are **masked**.

    [What are attention masks?](../glossary#attention-mask)

    Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
    [`PreTrainedTokenizer.__call__`] for details.

    If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
    `past_key_values`).

    If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
    and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
    information on the default strategy.

    - 1 indicates the head is **not masked**,
    - 0 indicates the head is **masked**.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
    Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. This
    is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the
    model's internal embedding lookup matrix.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
    config.n_positions - 1]`.

    [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
    or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
    only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>>> import torch
>>> from transformers import AutoProcessor, MoonshineForConditionalGeneration
>>> from datasets import load_dataset

>>> processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")

>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")

>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values

>>> generated_ids = model.generate(input_values, max_new_tokens=100)

>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> transcription
'Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
```N)r   rV  rX  rW  r+   rY  rZ  rC   r   r(  r   )logitsr|  r8   )	lossr~  r+   r\  r]  rK  r^  r   r_  )r&   rh   pad_token_idr6   r   ri  r0  loss_functionr8   r   r+   r\  r]  rK  r^  r   r_  )rK   r   r   rV  rW  rX  r+   rY  rZ  rC   r   r(  r   r|  r   r~  r  s                    rN   rt   )MoonshineForConditionalGeneration.forward  s   r  (-B-J$6KK44dkk6X6X%! '+jj)/+#9+"7!5/!5) '1 '
 w889%%Vt{{OeOe%fD#33")"?"?&99$55&-&G&G")"?"?&99

 
	
rP   )r   ri  )NNNNNNNNNNNNN)rW   rX   rY   rZ   _tied_weights_keysr(   rJ   rl  rp  rt  rx  rj   r@  r"  r   r   r   rx   r   r   r   r   r   r   r   rt   r_   r`   ra   s   @rN   rg  rg    s    ,, (('1bii 1  59598<=AEIZ^DHBF$(,0/359-1{
u001{
 !!1!12{
 $E$4$45	{

 !))9)9 :{
 "%e.?.?(@"AB{
 "%(;U5CTCT=U(U"VW{
  (e.?.?(@A{
 'uU-=-='>?{
 D>{
 $D>{
 'tn{
 !!1!12{
 ))*{
 
{
  {
rP   rg  )r(   rU  r   rg  )Ftypingr   r   r   r   rx   torch.nnrj   activationsr   cache_utilsr	   r
   r   configuration_utilsr   
generationr   modeling_attn_mask_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   r   modeling_rope_utilsr   modeling_utilsr   r   processing_utilsr   utilsr   r   r   glm.modeling_glmr   r    r!   llama.modeling_llamar"   r#   r$   whisper.modeling_whisperr%   r&   
get_loggerrW   r   r(   r@  rc   r{   r   r   r   r   r   r
  rB  rU  rg  __all__r7   rP   rN   <module>r     sO   4 3   ! C C 3 ) g B 9  : F & > > U U Y Y G 
		H	%J
& J
Z")) "))  q) q)h	1 	U- U"U6 Up "# "# "#JH
/ H
VK
z K
\~
\ ~
B 
W
(@/ W

W
trP   