
    fThqI                         S r SSKJr  SSKJr  \R
                  " \5      r " S S\5      r " S S\5      r	 " S S	\5      r
/ S
Qrg)zCLAP model configuration   )PretrainedConfig)loggingc                   ^   ^  \ rS rSrSrSrSr                   SU 4S jjrSrU =r	$ )ClapTextConfig   a   
This is the configuration class to store the configuration of a [`ClapTextModel`]. It is used to instantiate a CLAP
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the CLAP
[calp-hsat-fused](https://huggingface.co/laion/clap-hsat-fused) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    vocab_size (`int`, *optional*, defaults to 30522):
        Vocabulary size of the CLAP model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`ClapTextModel`].
    hidden_size (`int`, *optional*, defaults to 768):
        Dimensionality of the encoder layers and the pooler layer.
    num_hidden_layers (`int`, *optional*, defaults to 12):
        Number of hidden layers in the Transformer encoder.
    num_attention_heads (`int`, *optional*, defaults to 12):
        Number of attention heads for each attention layer in the Transformer encoder.
    intermediate_size (`int`, *optional*, defaults to 3072):
        Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
    hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"relu"`,
        `"relu"`, `"silu"` and `"relu_new"` are supported.
    hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
    attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the attention probabilities.
    max_position_embeddings (`int`, *optional*, defaults to 512):
        The maximum sequence length that this model might ever be used with. Typically set this to something large
        just in case (e.g., 512 or 1024 or 2048).
    type_vocab_size (`int`, *optional*, defaults to 2):
        The vocabulary size of the `token_type_ids` passed when calling [`ClapTextModel`].
    layer_norm_eps (`float`, *optional*, defaults to 1e-12):
        The epsilon used by the layer normalization layers.
    position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
        Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
        positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
        [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
        For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
        with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
    is_decoder (`bool`, *optional*, defaults to `False`):
        Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models). Only
        relevant if `config.is_decoder=True`.
    projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
        The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`,
        `"relu"`, `"silu"` and `"gelu_new"` are supported.
    projection_dim (`int`, *optional*, defaults to 512)
        Dimension of the projection head of the `ClapTextModelWithProjection`.

Examples:

```python
>>> from transformers import ClapTextConfig, ClapTextModel

>>> # Initializing a CLAP text configuration
>>> configuration = ClapTextConfig()

>>> # Initializing a model (with random weights) from the configuration
>>> model = ClapTextModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```clap_text_modeltext_configc                    > [         TU ]  " SXUS.UD6  Xl        X l        X0l        X@l        X`l        XPl        Xpl        Xl	        Xl
        Xl        Xl        Xl        UU l        UU l        UU l        Xl        g )N)pad_token_idbos_token_ideos_token_id )super__init__
vocab_sizehidden_sizenum_hidden_layersnum_attention_heads
hidden_actintermediate_sizehidden_dropout_probattention_probs_dropout_probmax_position_embeddingstype_vocab_sizeinitializer_factorlayer_norm_epsposition_embedding_type	use_cacheprojection_hidden_actprojection_dim)selfr   r   r   r   r   r   r   r   r   r   r   r   r    r   r   r   r   r   r   kwargs	__class__s                        c/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/clap/configuration_clap.pyr   ClapTextConfig.__init__`   s    . 	sl\hslrs$&!2#6 $!2#6 ,H)'>$."4,'>$"%:",    )r   r   r   r   r   r   r   r   r   r   r   r    r   r   r   r   )iY        r(   i   gelu皙?r*   i           ?g-q=   r+          absoluteTrelu
__name__
__module____qualname____firstlineno____doc__
model_typebase_config_keyr   __static_attributes____classcell__r#   s   @r$   r   r      s[    BH #J#O %( # *$)(- (-r&   r   c                   z   ^  \ rS rSrSrSrSrSSSSS	S	S	/S
SS/ SQ/ SQSSSSSSSSSSSS	S	SSS4U 4S jjrSrU =r	$ )ClapAudioConfig   a]  
This is the configuration class to store the configuration of a [`ClapAudioModel`]. It is used to instantiate a
CLAP audio encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the audio encoder of the CLAP
[laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    window_size (`int`, *optional*, defaults to 8):
        Image size of the spectrogram
    num_mel_bins (`int`, *optional*, defaults to 64):
        Number of mel features used per frames. Should correspond to the value used in the `ClapProcessor` class.
    spec_size (`int`, *optional*, defaults to 256):
        Desired input size of the spectrogram that the model supports. It can be different from the output of the
        `ClapFeatureExtractor`, in which case the input features will be resized. Corresponds to the `image_size`
        of the audio models.
    hidden_act (`str`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
        `"relu"`, `"silu"` and `"gelu_new"` are supported.
    patch_size (`int`, *optional*, defaults to 4):
        Patch size for the audio spectrogram
    patch_stride (`list`, *optional*, defaults to `[4, 4]`):
        Patch stride for the audio spectrogram
    num_classes (`int`, *optional*, defaults to 527):
        Number of classes used for the head training
    hidden_size (`int`, *optional*, defaults to 768):
        Hidden size of the output of the audio encoder. Correspond to the dimension of the penultimate layer's
        output,which is sent to the projection MLP layer.
    projection_dim (`int`, *optional*, defaults to 512):
        Hidden size of the projection layer.
    depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`):
        Depths used for the Swin Layers of the audio model
    num_attention_heads (`list`, *optional*, defaults to `[4, 8, 16, 32]`):
        Number of attention heads used for the Swin Layers of the audio model
    enable_fusion (`bool`, *optional*, defaults to `False`):
        Whether or not to enable patch fusion. This is the main contribution of the authors, and should give the
        best results.
    hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the encoder.
    fusion_type (`[type]`, *optional*):
        Fusion type used for the patch fusion.
    patch_embed_input_channels (`int`, *optional*, defaults to 1):
        Number of channels used for the input spectrogram
    flatten_patch_embeds (`bool`, *optional*, defaults to `True`):
        Whether or not to flatten the patch embeddings
    patch_embeds_hidden_size (`int`, *optional*, defaults to 96):
        Hidden size of the patch embeddings. It is used as the number of output channels.
    enable_patch_layer_norm (`bool`, *optional*, defaults to `True`):
        Whether or not to enable layer normalization for the patch embeddings
    drop_path_rate (`float`, *optional*, defaults to 0.0):
        Drop path rate for the patch fusion
    attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
        The dropout ratio for the attention probabilities.
    qkv_bias (`bool`, *optional*, defaults to `True`):
        Whether or not to add a bias to the query, key, value projections.
    mlp_ratio (`float`, *optional*, defaults to 4.0):
        Ratio of the mlp hidden dim to embedding dim.
    aff_block_r (`int`, *optional*, defaults to 4):
        downsize_ratio used in the AudioFF block
    num_hidden_layers (`int`, *optional*, defaults to 4):
        Number of hidden layers in the Transformer encoder.
    projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
        The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`,
        `"relu"`, `"silu"` and `"gelu_new"` are supported.
    layer_norm_eps (`[type]`, *optional*, defaults to 1e-05):
        The epsilon used by the layer normalization layers.
    initializer_factor (`float`, *optional*, defaults to 1.0):
        A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
        testing).

Example:

```python
>>> from transformers import ClapAudioConfig, ClapAudioModel

>>> # Initializing a ClapAudioConfig with laion/clap-htsat-fused style configuration
>>> configuration = ClapAudioConfig()

>>> # Initializing a ClapAudioModel (with random weights) from the laion/clap-htsat-fused style configuration
>>> model = ClapAudioModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```clap_audio_modelaudio_config   @      r)      i  r'   r-   )r/   r/      r/   )rE   rB          Fr*   Nr+   T`   g        g      @r1   gh㈵>r,   c                   > [         TU ]  " S0 UD6  Xl        X l        X0l        XPl        X`l        Xpl        Xl        Xl	        UU l
        Xl        Xl        Xl        Xl        X@l        Xl        Xl        UU l        UU l        UU l        UU l        UU l        UU l        UU l        Xl        UU l        UU l        UU l        UU l        g )Nr   )r   r   window_sizenum_mel_bins	spec_size
patch_sizepatch_stridenum_classesr   depthsr   r   enable_fusionfusion_typer   r   r    flatten_patch_embedspatch_embeds_hidden_sizeenable_patch_layer_normdrop_path_rater   qkv_bias	mlp_ratiopatch_embed_input_channelsaff_block_rr   r   r   )r!   rK   rL   rM   r   rN   rO   rP   r   r    rQ   r   rR   r   rS   rZ   rT   rU   rV   rW   r   rX   rY   r[   r   r   r   r   r"   r#   s                                r$   r   ClapAudioConfig.__init__   s    > 	"6"&("$(&&!2#6 &*&$#6 ,$8!(@%'>$,,H) "*D'&,"4%:"r&   )r[   r   rQ   rW   rR   rV   rT   rS   r   r   r   r   r   rY   r   rP   r   rL   rZ   rU   rN   rO   r    r   rX   rM   rK   r2   r<   s   @r$   r>   r>      sw    Un $J$O V*#$!!# $%($9;; ;;r&   r>   c                   f   ^  \ rS rSrSrSr\\S.r      S
U 4S jjr	\
S\S\4S j5       rS	rU =r$ )
ClapConfigi$  a  
[`ClapConfig`] is the configuration class to store the configuration of a [`ClapModel`]. It is used to instantiate
a CLAP model according to the specified arguments, defining the text model and audio model configs. Instantiating a
configuration with the defaults will yield a similar configuration to that of the CLAP
[laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    text_config (`dict`, *optional*):
        Dictionary of configuration options used to initialize [`ClapTextConfig`].
    audio_config (`dict`, *optional*):
        Dictionary of configuration options used to initialize [`ClapAudioConfig`].
    logit_scale_init_value (`float`, *optional*, defaults to 14.29):
        The initial value of the *logit_scale* parameter. Default is used as per the original CLAP implementation.
    projection_dim (`int`, *optional*, defaults to 512):
        Dimensionality of text and audio projection layers.
    projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
        Activation function for the projection layers.
    initializer_factor (`float`, *optional*, defaults to 1.0):
        Factor to scale the initialization of the model weights.
    kwargs (*optional*):
        Dictionary of keyword arguments.

Example:

```python
>>> from transformers import ClapConfig, ClapModel

>>> # Initializing a ClapConfig with laion-ai/base style configuration
>>> configuration = ClapConfig()

>>> # Initializing a ClapModel (with random weights) from the laion-ai/base style configuration
>>> model = ClapModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config

>>> # We can also initialize a ClapConfig from a ClapTextConfig and a ClapAudioConfig
>>> from transformers import ClapTextConfig, ClapAudioConfig

>>> # Initializing a ClapText and ClapAudioConfig configuration
>>> config_text = ClapTextConfig()
>>> config_audio = ClapAudioConfig()

>>> config = ClapConfig.from_text_audio_configs(config_text, config_audio)
```clapr	   rA   c                 *  > [         TU ]  " S0 UD6  Uc  0 n[        R                  S5        Uc  0 n[        R                  S5        [	        S0 UD6U l        [        S0 UD6U l        X@R
                  l        X@R                  l        XPR
                  l	        XPR                  l	        X@l        XPl	        U R
                  R                  U l
        X0l        X`l        U R
                  R                  [        U R                  R                  5      -   U l        g )NzItext_config is None. Initializing the ClapTextConfig with default values.zKaudio_config is None. initializing the ClapAudioConfig with default values.r   )r   r   loggerinfor   r	   r>   rA   r    r   r   logit_scale_init_valuer   r   lenrQ   )	r!   r	   rA   rd   r    r   r   r"   r#   s	           r$   r   ClapConfig.__init__Y  s     	"6"KKKcdLKKef)8K8+;l;*8'+9(1F.2G/,%:"++77&<#"4!%!1!1!C!Cc$J[J[JbJbFc!cr&   r	   rA   c                 P    U " SUR                  5       UR                  5       S.UD6$ )z
Instantiate a [`ClapConfig`] (or a derived class) from clap text model configuration and clap audio model
configuration.

Returns:
    [`ClapConfig`]: An instance of a configuration object
r`   r   )to_dict)clsr	   rA   r"   s       r$   from_text_audio_configs"ClapConfig.from_text_audio_configs}  s,     d{224<CWCWCYd]cddr&   )rA   r   r   rd   r   r    r   r	   )NNg$I$I,@r-   r1   r,   )r3   r4   r5   r6   r7   r8   r   r>   sub_configsr   classmethodrj   r:   r;   r<   s   @r$   r^   r^   $  s\    /b J"0/RK  ($"dH 	e. 	eP_ 	e 	er&   r^   )r>   r^   r   N)r7   configuration_utilsr   utilsr   
get_loggerr3   rb   r   r>   r^   __all__r   r&   r$   <module>rr      s[     3  
		H	%p-% p-fV;& V;rce! ceL >r&   