
    eTh$J                         S r SSKJr  SSKJr  \R
                  " \5      r " S S\5      r " S S\5      r	 " S S	\5      r
/ S
Qrg)zAltCLIP model configuration   )PretrainedConfig)loggingc                   Z   ^  \ rS rSrSrSr                   SU 4S jjrSrU =r$ )AltCLIPTextConfig   a  
This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a
AltCLIP text model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    vocab_size (`int`, *optional*, defaults to 250002):
        Vocabulary size of the AltCLIP model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`AltCLIPTextModel`].
    hidden_size (`int`, *optional*, defaults to 1024):
        Dimensionality of the encoder layers and the pooler layer.
    num_hidden_layers (`int`, *optional*, defaults to 24):
        Number of hidden layers in the Transformer encoder.
    num_attention_heads (`int`, *optional*, defaults to 16):
        Number of attention heads for each attention layer in the Transformer encoder.
    intermediate_size (`int`, *optional*, defaults to 4096):
        Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
    hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
        `"relu"`, `"silu"` and `"gelu_new"` are supported.
    hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
    attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the attention probabilities.
    max_position_embeddings (`int`, *optional*, defaults to 514):
        The maximum sequence length that this model might ever be used with. Typically set this to something large
        just in case (e.g., 512 or 1024 or 2048).
    type_vocab_size (`int`, *optional*, defaults to 1):
        The vocabulary size of the `token_type_ids` passed when calling [`AltCLIPTextModel`]
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    initializer_factor (`float`, *optional*, defaults to 0.02):
        A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
        testing).
    layer_norm_eps (`float`, *optional*, defaults to 1e-05):
        The epsilon used by the layer normalization layers.
    pad_token_id (`int`, *optional*, defaults to 1): The id of the *padding* token.
    bos_token_id (`int`, *optional*, defaults to 0): The id of the *beginning-of-sequence* token.
    eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2):
        The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
    position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
        Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
        positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
        [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
        For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
        with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models). Only
        relevant if `config.is_decoder=True`.
    project_dim (`int`, *optional*, defaults to 768):
        The dimensions of the teacher model before the mapping layer.

Examples:

```python
>>> from transformers import AltCLIPTextModel, AltCLIPTextConfig

>>> # Initializing a AltCLIPTextConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPTextConfig()

>>> # Initializing a AltCLIPTextModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPTextModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```altclip_text_modelc                    > [         TU ]  " SXUS.UD6  Xl        X l        X0l        X@l        X`l        XPl        Xpl        Xl	        Xl
        Xl        Xl        Xl        Xl        UU l        UU l        UU l        g )N)pad_token_idbos_token_ideos_token_id )super__init__
vocab_sizehidden_sizenum_hidden_layersnum_attention_heads
hidden_actintermediate_sizehidden_dropout_probattention_probs_dropout_probmax_position_embeddingstype_vocab_sizeinitializer_rangeinitializer_factorlayer_norm_epsposition_embedding_type	use_cacheproject_dim)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r
   r   r   r   r   r   kwargs	__class__s                        i/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/altclip/configuration_altclip.pyr   AltCLIPTextConfig.__init__c   s    . 	sl\hslrs$&!2#6 $!2#6 ,H)'>$.!2"4,'>$"&    )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   )i i   r      i   gelu皙?r(   i     {Gz?r*   h㈵>r)          absoluteT   )	__name__
__module____qualname____firstlineno____doc__
model_typer   __static_attributes____classcell__r"   s   @r#   r   r      sV    FP &J %( # *)(' ('r%   r   c                   R   ^  \ rS rSrSrSrSr             SU 4S jjrSrU =r	$ )AltCLIPVisionConfig   a
  
This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    hidden_size (`int`, *optional*, defaults to 768):
        Dimensionality of the encoder layers and the pooler layer.
    intermediate_size (`int`, *optional*, defaults to 3072):
        Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
    projection_dim (`int`, *optional*, defaults to 512):
        Dimensionality of text and vision projection layers.
    num_hidden_layers (`int`, *optional*, defaults to 12):
        Number of hidden layers in the Transformer encoder.
    num_attention_heads (`int`, *optional*, defaults to 12):
        Number of attention heads for each attention layer in the Transformer encoder.
    num_channels (`int`, *optional*, defaults to 3):
        The number of input channels.
    image_size (`int`, *optional*, defaults to 224):
        The size (resolution) of each image.
    patch_size (`int`, *optional*, defaults to 32):
        The size (resolution) of each patch.
    hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
        `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
    layer_norm_eps (`float`, *optional*, defaults to 1e-05):
        The epsilon used by the layer normalization layers.
    attention_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for the attention probabilities.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    initializer_factor (`float`, *optional*, defaults to 1.0):
        A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
        testing).

Example:

```python
>>> from transformers import AltCLIPVisionConfig, AltCLIPVisionModel

>>> # Initializing a AltCLIPVisionConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPVisionConfig()

>>> # Initializing a AltCLIPVisionModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPVisionModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```altclip_vision_modelvision_configc                    > [         TU ]  " S0 UD6  Xl        X l        X0l        X@l        XPl        X`l        Xl        Xpl	        Xl
        Xl        Xl        Xl        Xl        g )Nr   )r   r   r   r   projection_dimr   r   num_channels
patch_size
image_sizer   r   attention_dropoutr   r   )r    r   r   r?   r   r   r@   rB   rA   r   r   rC   r   r   r!   r"   s                  r#   r   AltCLIPVisionConfig.__init__   s`    " 	"6"&!2,!2#6 ($$!2"4!2,$r%   )rC   r   r   rB   r   r   r   r   r   r@   r   rA   r?   )r/   i   i      rE   r          
quick_gelur+   g        r*         ?)
r0   r1   r2   r3   r4   r5   base_config_keyr   r6   r7   r8   s   @r#   r:   r:      sH    5n (J%O % %r%   r:   c                   \   ^  \ rS rSrSrSr\\S.r S
U 4S jjr	\
S\S\4S j5       rS	rU =r$ )AltCLIPConfig   ak  
This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    text_config (`dict`, *optional*):
        Dictionary of configuration options used to initialize [`AltCLIPTextConfig`].
    vision_config (`dict`, *optional*):
        Dictionary of configuration options used to initialize [`AltCLIPVisionConfig`].
    projection_dim (`int`, *optional*, defaults to 768):
        Dimensionality of text and vision projection layers.
    logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
        The initial value of the *logit_scale* parameter. Default is used as per the original CLIP implementation.
    kwargs (*optional*):
        Dictionary of keyword arguments.

Example:

```python
>>> from transformers import AltCLIPConfig, AltCLIPModel

>>> # Initializing a AltCLIPConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPConfig()

>>> # Initializing a AltCLIPModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config

>>> # We can also initialize a AltCLIPConfig from a AltCLIPTextConfig and a AltCLIPVisionConfig

>>> # Initializing a AltCLIPText and AltCLIPVision configuration
>>> config_text = AltCLIPTextConfig()
>>> config_vision = AltCLIPVisionConfig()

>>> config = AltCLIPConfig.from_text_vision_configs(config_text, config_vision)
```altcliptext_configr=   c                   > UR                  SS 5      nUR                  SS 5      n[        TU ]  " S0 UD6  Ub  Uc  0 n[        S0 UD6R	                  5       nUR                  5        HH  u  pX;   d  M  XU	   :w  d  M  U	S;  d  M  X;   a
  SU	 SU	 S3nOSU	 S3n[        R                  U5        MJ     UR                  U5        Ub  Uc  0 n[        S0 UD6R	                  5       nS	U;   a5  US	   R                  5        V	V
s0 s H  u  p[        U	5      U
_M     sn
n	US	'   UR                  5        HH  u  pX;   d  M  XU	   :w  d  M  U	S;  d  M  X;   a
  SU	 S
U	 S3nOSU	 S3n[        R                  U5        MJ     UR                  U5        Uc  0 n[        R                  S5        Uc  0 n[        R                  S5        [        S0 UD6U l        [        S0 UD6U l        X0l        X@l        SU l        g s  sn
n	f )Ntext_config_dictvision_config_dict)transformers_version`zp` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["z"]` will be used instead.zm`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The value `text_config["z"]` will be overridden.id2labelzv` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["zs`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. The value `vision_config["zR`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.zV`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.rI   r   )popr   r   r   to_dictitemsloggerinfoupdater:   strrP   r=   r?   logit_scale_init_valuer   )r    rP   r=   r?   r^   r!   rR   rS   _text_config_dictkeyvaluemessage_vision_config_dictr"   s                r#   r   AltCLIPConfig.__init__  sS    "::&8$?#ZZ(<dC"6"
 '"  !2 E4D E M M O 0557
%%s3C*CSkHk.u %<<?5@Y[  336%7NP   KK( 8" 01)$ " #6"K8J"K"S"S"U006I*6U6[6[6]36]
CHeO6]3#J/
 2779
'E35G,GCWoLo0u %FFIUJce  99<=TV   KK( :"   !45KKKlm MKKpq,;{;0A=A,&<#"%K3s   G5rP   r=   c                 P    U " SUR                  5       UR                  5       S.UD6$ )z
Instantiate a [`AltCLIPConfig`] (or a derived class) from altclip text model configuration and altclip vision
model configuration.

Returns:
    [`AltCLIPConfig`]: An instance of a configuration object
rO   r   )rX   )clsrP   r=   r!   s       r#   from_text_vision_configs&AltCLIPConfig.from_text_vision_configss  s,     f{224MDYDYD[f_effr%   )r   r^   r?   rP   r=   )NNr/   g/L
F@)r0   r1   r2   r3   r4   r5   r   r:   sub_configsr   classmethodrg   r6   r7   r8   s   @r#   rL   rL      sQ    *X J"3FYZK `fV&p 	g3D 	gUh 	g 	gr%   rL   )r   r:   rL   N)r4   configuration_utilsr   utilsr   
get_loggerr0   rZ   r   r:   rL   __all__r   r%   r#   <module>ro      s\    " 3  
		H	%s'( s'lZ%* Z%zRg$ Rgj Hr%   