o
    ZŽhZ:  ã                   @   sP   d Z ddlZddlZddlmZ ddlmZ e e¡Z	G dd„ deƒZ
dgZdS )zHubert model configurationé    Né   )ÚPretrainedConfig)Úloggingc                       s€   e Zd ZdZdZ												
																				
									d‡ fdd„	Zedd„ ƒZ‡  ZS ) ÚHubertConfiga6&  
    This is the configuration class to store the configuration of a [`HubertModel`]. It is used to instantiate an
    Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Hubert
    [facebook/hubert-base-ls960](https://huggingface.co/facebook/hubert-base-ls960) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 32):
            Vocabulary size of the Hubert model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`HubertModel`]. Vocabulary size of the model. Defines the different
            tokens that can be represented by the *inputs_ids* passed to the forward method of [`HubertModel`].
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` are supported.
        hidden_dropout(`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        activation_dropout (`float`, *optional*, defaults to 0.1):
            The dropout ratio for activations inside the fully connected layer.
        attention_dropout(`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        final_dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for the final projection layer of [`Wav2Vec2ForCTC`].
        layerdrop (`float`, *optional*, defaults to 0.1):
            The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
            details.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        feat_extract_norm (`str`, *optional*, defaults to `"group"`):
            The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
            normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
            convolutional layers.
        feat_proj_dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability for output of the feature encoder.
        feat_proj_layer_norm (`bool`, *optional*, defaults to `True`):
            Whether to apply LayerNorm to the output of the feature encoder.
        feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the 1D convolutional layers of the feature
            extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
        conv_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
            A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
            feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
        conv_stride (`Tuple[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
            A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
            of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
        conv_kernel (`Tuple[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
            A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
            length of *conv_kernel* defines the number of convolutional layers and has to match the length of
            *conv_dim*.
        conv_bias (`bool`, *optional*, defaults to `False`):
            Whether the 1D convolutional layers have a bias.
        num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
            Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
            embeddings layer.
        num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
            Number of groups of 1D convolutional positional embeddings layer.
        conv_pos_batch_norm (`bool`, *optional*, defaults to `False`):
            Whether to use batch norm instead of weight norm in conv_pos
        do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
            Whether do apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
            True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
            False` corresponds to applying layer norm after the attention layer.
        apply_spec_augment (`bool`, *optional*, defaults to `True`):
            Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
            [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
            Recognition](https://arxiv.org/abs/1904.08779).
        mask_time_prob (`float`, *optional*, defaults to 0.05):
            Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
            procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
            reasoning from the probability of each feature vector to be chosen as the start of the vector span to be
            masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
            actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
        mask_time_length (`int`, *optional*, defaults to 10):
            Length of vector span along the time axis.
        mask_time_min_masks (`int`, *optional*, defaults to 2),:
            The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
            irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
            mask_time_min_masks''
        mask_feature_prob (`float`, *optional*, defaults to 0.0):
            Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
            masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
            the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector
            span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
            may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
            True`.
        mask_feature_length (`int`, *optional*, defaults to 10):
            Length of vector span along the feature axis.
        mask_feature_min_masks (`int`, *optional*, defaults to 0),:
            The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
            step, irrespectively of `mask_feature_prob`. Only relevant if
            ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
        ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
            Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
            instance of [`HubertForCTC`].
        ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
            Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
            occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
            of [`HubertForCTC`].
        use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
            Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
            instance of [`HubertForSequenceClassification`].
        classifier_proj_size (`int`, *optional*, defaults to 256):
            Dimensionality of the projection before token mean-pooling for classification.

    Example:

    ```python
    >>> from transformers import HubertModel, HubertConfig

    >>> # Initializing a Hubert facebook/hubert-base-ls960 style configuration
    >>> configuration = HubertConfig()

    >>> # Initializing a model from the facebook/hubert-base-ls960 style configuration
    >>> model = HubertModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zhuberté    é   é   é   Úgeluçš™™™™™¹?Tç        ç{®Gáz”?çñhãˆµøä>Úgroup©é   r   r   r   r   r   r   ©é   é   r   r   r   r   r   ©é
   r   r   r   r   r   r   Fé€   é   çš™™™™™©?r   r   r   Úsumé   é   c(           )         sp  t ƒ jdi |(¤|%|&|'dœ¤Ž || _|| _|| _t|ƒ| _t|ƒ| _t|ƒ| _|| _	|| _
|| _|| _t| jƒ| _|| _|| _|| _|| _|| _|	| _|| _|
| _|| _|| _|| _|| _|| _|| _|| _|#| _|$| _t| jƒ| jks„t| jƒ| jks„t| jƒ| jkr›t dt| jƒ› dt| jƒ› dt| jƒ› dƒ‚|| _!|| _"|| _#|| _$|| _%|| _&| | _'|!| _(|"| _)d S )N)Úpad_token_idÚbos_token_idÚeos_token_idzºConfiguration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = z`, `len(config.conv_stride) = z`, `len(config.conv_kernel) = z`.© )*ÚsuperÚ__init__Úhidden_sizeÚfeat_extract_normÚfeat_extract_activationÚlistÚconv_dimÚconv_strideÚconv_kernelÚ	conv_biasÚnum_conv_pos_embeddingsÚnum_conv_pos_embedding_groupsÚconv_pos_batch_normÚlenZnum_feat_extract_layersÚnum_hidden_layersÚintermediate_sizeÚ
hidden_actÚnum_attention_headsÚhidden_dropoutÚattention_dropoutÚactivation_dropoutÚfeat_proj_layer_normÚfeat_proj_dropoutÚfinal_dropoutÚ	layerdropÚlayer_norm_epsÚinitializer_rangeÚ
vocab_sizeÚdo_stable_layer_normÚuse_weighted_layer_sumÚclassifier_proj_sizeÚ
ValueErrorÚapply_spec_augmentÚmask_time_probÚmask_time_lengthÚmask_time_min_masksÚmask_feature_probÚmask_feature_lengthÚmask_feature_min_masksÚctc_loss_reductionÚctc_zero_infinity))Úselfr<   r#   r/   r2   r0   r1   r3   r5   r4   r6   r7   r8   r9   r;   r:   r$   r%   r'   r(   r)   r*   r+   r,   r-   r=   rA   rB   rC   rD   rE   rF   rG   rH   rI   r>   r?   r   r   r   Úkwargs©Ú	__class__r    ú^/var/www/auris/lib/python3.10/site-packages/transformers/models/hubert/configuration_hubert.pyr"   ¢   sd   +


þþýÿ
zHubertConfig.__init__c                 C   s   t  tj| jd¡S )Nr   )Ú	functoolsÚreduceÚoperatorÚmulr(   )rJ   r    r    rN   Úinputs_to_logits_ratio  s   z#HubertConfig.inputs_to_logits_ratio)'r   r   r   r   r	   r
   r   r   r   Tr   r   r   r   r   r   r
   r   r   r   Fr   r   FFTr   r   r   r   r   r   r   FFr   r   r   r   )	Ú__name__Ú
__module__Ú__qualname__Ú__doc__Z
model_typer"   ÚpropertyrS   Ú__classcell__r    r    rL   rN   r      s\     Øbr   )rW   rO   rQ   Zconfiguration_utilsr   Úutilsr   Z
get_loggerrT   Úloggerr   Ú__all__r    r    r    rN   Ú<module>   s   
 
o