
    fThlN                     p    S r SSKrSSKrSSKJr  SSKJr  \R                  " \5      r	 " S S\5      r
S/rg)zWav2Vec2 model configuration    N   )PretrainedConfig)loggingc                      ^  \ rS rSrSrSr                                                       SU 4S jjr\S 5       rSr	U =r
$ )Wav2Vec2Config   a81  
This is the configuration class to store the configuration of a [`Wav2Vec2Model`]. It is used to instantiate an
Wav2Vec2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Wav2Vec2
[facebook/wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base-960h) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    vocab_size (`int`, *optional*, defaults to 32):
        Vocabulary size of the Wav2Vec2 model. Defines the number of different tokens that can be represented by
        the `inputs_ids` passed when calling [`Wav2Vec2Model`] or [`TFWav2Vec2Model`]. Vocabulary size of the
        model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward
        method of [`Wav2Vec2Model`].
    hidden_size (`int`, *optional*, defaults to 768):
        Dimensionality of the encoder layers and the pooler layer.
    num_hidden_layers (`int`, *optional*, defaults to 12):
        Number of hidden layers in the Transformer encoder.
    num_attention_heads (`int`, *optional*, defaults to 12):
        Number of attention heads for each attention layer in the Transformer encoder.
    intermediate_size (`int`, *optional*, defaults to 3072):
        Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
    hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
        `"relu"`, `"selu"` and `"gelu_new"` are supported.
    hidden_dropout (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
    activation_dropout (`float`, *optional*, defaults to 0.1):
        The dropout ratio for activations inside the fully connected layer.
    attention_dropout (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the attention probabilities.
    final_dropout (`float`, *optional*, defaults to 0.1):
        The dropout probability for the final projection layer of [`Wav2Vec2ForCTC`].
    layerdrop (`float`, *optional*, defaults to 0.1):
        The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
        details.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    layer_norm_eps (`float`, *optional*, defaults to 1e-12):
        The epsilon used by the layer normalization layers.
    feat_extract_norm (`str`, *optional*, defaults to `"group"`):
        The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
        normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
        convolutional layers.
    feat_proj_dropout (`float`, *optional*, defaults to 0.0):
        The dropout probability for output of the feature encoder.
    feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the 1D convolutional layers of the feature
        extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
    feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
        The dropout probability for quantized feature encoder states.
    conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
        A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
        feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
    conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
        A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
        of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
    conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
        A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
        length of *conv_kernel* defines the number of convolutional layers and has to match the length of
        *conv_dim*.
    conv_bias (`bool`, *optional*, defaults to `False`):
        Whether the 1D convolutional layers have a bias.
    num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
        Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
        embeddings layer.
    num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
        Number of groups of 1D convolutional positional embeddings layer.
    do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
        Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
        True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
        False` corresponds to applying layer norm after the attention layer.
    apply_spec_augment (`bool`, *optional*, defaults to `True`):
        Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
        [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
        Recognition](https://arxiv.org/abs/1904.08779).
    mask_time_prob (`float`, *optional*, defaults to 0.05):
        Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
        procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
        reasoning from the probability of each feature vector to be chosen as the start of the vector span to be
        masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
        actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
    mask_time_length (`int`, *optional*, defaults to 10):
        Length of vector span along the time axis.
    mask_time_min_masks (`int`, *optional*, defaults to 2),:
        The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
        irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
        mask_time_min_masks''
    mask_feature_prob (`float`, *optional*, defaults to 0.0):
        Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
        masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
        the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector
        span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
        may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
        True`.
    mask_feature_length (`int`, *optional*, defaults to 10):
        Length of vector span along the feature axis.
    mask_feature_min_masks (`int`, *optional*, defaults to 0),:
        The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
        step, irrespectively of `mask_feature_prob`. Only relevant if
        ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
    num_codevectors_per_group (`int`, *optional*, defaults to 320):
        Number of entries in each quantization codebook (group).
    num_codevector_groups (`int`, *optional*, defaults to 2):
        Number of codevector groups for product codevector quantization.
    contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
        The temperature *kappa* in the contrastive loss.
    feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
        The dropout probability for the output of the feature encoder that's used by the quantizer.
    num_negatives (`int`, *optional*, defaults to 100):
        Number of negative samples for the contrastive loss.
    codevector_dim (`int`, *optional*, defaults to 256):
        Dimensionality of the quantized feature vectors.
    proj_codevector_dim (`int`, *optional*, defaults to 256):
        Dimensionality of the final projection of both the quantized and the transformer features.
    diversity_loss_weight (`int`, *optional*, defaults to 0.1):
        The weight of the codebook diversity loss component.
    ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
        Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
        instance of [`Wav2Vec2ForCTC`].
    ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
        Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
        occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
        of [`Wav2Vec2ForCTC`].
    use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
        Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
        instance of [`Wav2Vec2ForSequenceClassification`].
    classifier_proj_size (`int`, *optional*, defaults to 256):
        Dimensionality of the projection before token mean-pooling for classification.
    tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
        A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
        module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
    tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
        A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
        *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
    tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
        A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
        *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
    xvector_output_dim (`int`, *optional*, defaults to 512):
        Dimensionality of the *XVector* embedding vectors.
    add_adapter (`bool`, *optional*, defaults to `False`):
        Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for
        warm-starting Wav2Vec2 for SpeechEncoderDecoder models.
    adapter_kernel_size (`int`, *optional*, defaults to 3):
        Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
    adapter_stride (`int`, *optional*, defaults to 2):
        Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
    num_adapter_layers (`int`, *optional*, defaults to 3):
        Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
        True`.
    adapter_attn_dim (`int`, *optional*):
        Dimension of the attention adapter weights to be used in each attention block. An example of a model using
        attention adapters is [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all).
    output_hidden_size (`int`, *optional*):
        Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
        if `add_adapter is True`.

Example:

```python
>>> from transformers import Wav2Vec2Config, Wav2Vec2Model

>>> # Initializing a Wav2Vec2 facebook/wav2vec2-base-960h style configuration
>>> configuration = Wav2Vec2Config()

>>> # Initializing a model (with random weights) from the facebook/wav2vec2-base-960h style configuration
>>> model = Wav2Vec2Model(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```wav2vec2c8           
        > [         T9U ]  " S0 U8DU/U0U1S.D6  X l        UU l        UU l        [        U5      U l        [        U5      U l        [        U5      U l        UU l	        UU l
        UU l        [        U R                  5      U l        X0l        XPl        X`l        X@l        Xpl        Xl        Xl        Xl        Xl        Xl        Xl        Xl        Xl        UU l        U)U l        [        U R                  5      U R                  :w  dF  [        U R                  5      U R                  :w  d#  [        U R                  5      U R                  :w  aN  [;        S[        U R                  5       S[        U R                  5       S[        U R                  5       S35      eUU l        UU l        UU l         UU l!        UU l"        UU l#        UU l$        U U l%        U!U l&        U"U l'        Xl(        U#U l)        U$U l*        U%U l+        U&U l,        U'U l-        U(U l.        U2U l/        U3U l0        U4U l1        U5U l2        U6=(       d    UU l3        U7U l4        U*U l5        [        U+5      U l6        [        U,5      U l7        [        U-5      U l8        U.U l9        g )N)pad_token_idbos_token_ideos_token_idzConfiguration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = z`, `len(config.conv_stride) = z`, `len(config.conv_kernel) = z`. ):super__init__hidden_sizefeat_extract_normfeat_extract_activationlistconv_dimconv_strideconv_kernel	conv_biasnum_conv_pos_embeddingsnum_conv_pos_embedding_groupslennum_feat_extract_layersnum_hidden_layersintermediate_size
hidden_actnum_attention_headshidden_dropoutattention_dropoutactivation_dropoutfeat_proj_dropoutfinal_dropout	layerdroplayer_norm_epsinitializer_range
vocab_sizedo_stable_layer_normuse_weighted_layer_sum
ValueErrorapply_spec_augmentmask_time_probmask_time_lengthmask_time_min_masksmask_feature_probmask_feature_lengthmask_feature_min_masksnum_codevectors_per_groupnum_codevector_groupscontrastive_logits_temperaturefeat_quantizer_dropoutnum_negativescodevector_dimproj_codevector_dimdiversity_loss_weightctc_loss_reductionctc_zero_infinityadd_adapteradapter_kernel_sizeadapter_stridenum_adapter_layersoutput_hidden_sizeadapter_attn_dimclassifier_proj_sizetdnn_dimtdnn_kerneltdnn_dilationxvector_output_dim):selfr)   r   r   r    r   r   r!   r#   r"   r$   r7   r%   r&   r(   r'   r   r   r   r   r   r   r   r   r*   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r8   r9   r:   r;   r<   r=   r+   rD   rE   rF   rG   rH   r   r   r   r>   r?   r@   rA   rB   rC   kwargs	__class__s:                                                            k/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/wav2vec2/configuration_wav2vec2.pyr   Wav2Vec2Config.__init__   sy   v 	s6s<frs&!2'>$X,,"'>$-J*'*4=='9$!2!2$#6 ,!2"4!2*",!2$$8!&<# !!"d&B&BBD$$%)E)EEDMM"d&B&BB&''Ec$JZJZF[E\ ]//243C3C/D.ERI  #5, 0#6 !2#6 &<# *C&%:".L+&<#*,#6 %:" #5!2 '#6 ,"4"4"C 0 %9! X,!-0"4    c                 b    [         R                  " [        R                  U R                  S5      $ )N   )	functoolsreduceoperatormulr   )rI   s    rL   inputs_to_logits_ratio%Wav2Vec2Config.inputs_to_logits_ratioV  s!    d.>.>BBrN   )5r#   rC   r?   r@   r>   r-   r"   rD   r9   r6   r   r   r   r   r<   r=   r;   r*   r   r   r$   r7   r%   r   r!   r   r(   r   r'   r&   r2   r3   r1   r/   r0   r.   rA   r    r5   r4   r   r   r   r   r8   rB   r:   rG   rE   rF   r+   r)   rH   )7    i      rX   i   gelu皙?rZ   rZ           r[   rZ   rZ   g{Gz?gh㈵>grouprY   )   r]   r]   r]   r]   r]   r]   )      r_   r_   r_   r_   r_   )
   r   r   r   r   r_   r_   F      FTg?r`   r_   r[   r`   r   i@  r_   rZ   d      rd   rZ   sumFFrd   )r]   r]   r]   r]   i  )r^   r   r   rP   rP   )rP   r_   r   rP   rP   r]   r   rP   r_   Fr   r_   r   NN)__name__
__module____qualname____firstlineno____doc__
model_typer   propertyrU   __static_attributes____classcell__)rK   s   @rL   r   r      s    l\ J "! &4)* #&(" "%'*! $ +#%qH5T C CrN   r   )rj   rQ   rS   configuration_utilsr   utilsr   
get_loggerrf   loggerr   __all__r   rN   rL   <module>rt      sD    #   3  
		H	%}C% }C@
 
rN   