
    fThh.                         S r SSKJr  SSKJr  \R
                  " \5      r " S S\5      r " S S\5      r	 " S S	\5      r
S	/rg
)zKOSMOS-2 model configuration   )PretrainedConfig)loggingc                   n   ^  \ rS rSrSrSrSrS/rSSSS	.r                  SU 4S
 jjr	Sr
U =r$ )Kosmos2TextConfig   a  
This is the configuration class to store the configuration of a [`Kosmos2TextModel`]. It is used to instantiate a
KOSMOS-2 text decoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the text decoder of the KOSMOS-2
[microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    vocab_size (`int`, *optional*, defaults to 65037):
        Vocabulary size of the Kosmos2 model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`Kosmos2Model`].
    max_position_embeddings (`int`, *optional*, defaults to 2048):
        The maximum sequence length that this model might ever be used with. Typically set this to something large
        just in case (e.g., 512 or 1024 or 2048).
    embed_dim (`int`, *optional*, defaults to 2048):
        Dimensionality of the layers and the pooler layer.
    layers (`int`, *optional*, defaults to 24):
        Number of hidden layers in the Transformer encoder.
    ffn_dim (`int`, *optional*, defaults to 8192):
        Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
    attention_heads (`int`, *optional*, defaults to 32):
        Number of attention heads for each attention layer in the Transformer encoder.
    activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
        `"relu"`, `"silu"` and `"gelu_new"` are supported.
    dropout (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
    attention_dropout (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the attention probabilities.
    activation_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for activations inside the fully connected layer.
    layerdrop (`float`, *optional*, defaults to 0.0):
        The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
        for more details.
    layer_norm_eps (`float`, *optional*, defaults to 1e-05):
        The epsilon used by the layer normalization layers.
    init_std (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    scale_embedding (`bool`, *optional*, defaults to `True`):
        Scale embeddings by diving by sqrt(embed_dim).
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models).
    pad_token_id (`int`, *optional*, defaults to 1):
        Token id used for padding.
    bos_token_id (`int`, *optional*, defaults to 0):
        Token id used for beginning of string.
    eos_token_id (`int`, *optional*, defaults to 2):
        Token id used for end of string.
```kosmos_2_text_modeltext_configpast_key_valuesattention_heads	embed_dimlayers)num_attention_headshidden_sizenum_hidden_layersc                    > [         TU ]  " SUUUS.UD6  Xl        X l        X0l        X@l        XPl        X`l        Xpl        Xl	        Xl
        Xl        Xl        Xl        Xl        Xl        Xl        g )N)pad_token_idbos_token_ideos_token_id )super__init__
vocab_sizemax_position_embeddingsr   r   ffn_dimr   activation_functiondropoutattention_dropoutactivation_dropout	layerdroplayer_norm_epsinit_stdscale_embedding	use_cache)selfr   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r   r   r   kwargs	__class__s                       i/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/kosmos2/configuration_kosmos2.pyr   Kosmos2TextConfig.__init__V   s~    , 	 	
%%%	
 		
 %'>$".#6 !2"4", ."    )r   r   r   r   r   r   r   r!   r    r   r   r   r"   r#   r   )i     r*   r   i        gelu皙?r-           r.   h㈵>{Gz?TT          )__name__
__module____qualname____firstlineno____doc__
model_typebase_config_keykeys_to_ignore_at_inferenceattribute_mapr   __static_attributes____classcell__r&   s   @r'   r   r      sp    2h 'J#O#4"50"%M  $"'+# +#r)   r   c                   P   ^  \ rS rSrSrSrSr            SU 4S jjrSrU =r	$ )Kosmos2VisionConfig   a	  
This is the configuration class to store the configuration of a [`Kosmos2VisionModel`]. It is used to instantiate a
KOSMOS-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of the KOSMOS-2
[microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    hidden_size (`int`, *optional*, defaults to 1024):
        Dimensionality of the encoder layers and the pooler layer.
    intermediate_size (`int`, *optional*, defaults to 4096):
        Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
    num_hidden_layers (`int`, *optional*, defaults to 24):
        Number of hidden layers in the Transformer encoder.
    num_attention_heads (`int`, *optional*, defaults to 16):
        Number of attention heads for each attention layer in the Transformer encoder.
    num_channels (`int`, *optional*, defaults to 3):
        The number of input channels.
    image_size (`int`, *optional*, defaults to 224):
        The size (resolution) of each image.
    patch_size (`int`, *optional*, defaults to 14):
        The size (resolution) of each patch.
    hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
        `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
    layer_norm_eps (`float`, *optional*, defaults to 1e-05):
        The epsilon used by the layer normalization layers.
    attention_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for the attention probabilities.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    initializer_factor (`float`, *optional*, defaults to 1.0):
        A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
        testing).
```kosmos_2_vision_modelvision_configc                    > [         TU ]  " S0 UD6  Xl        X l        X0l        X@l        XPl        Xpl        X`l        Xl	        Xl
        Xl        Xl        Xl        g )Nr   )r   r   r   intermediate_sizer   r   num_channels
patch_size
image_sizeinitializer_rangeinitializer_factorr   r    
hidden_act)r$   r   rF   r   r   rG   rI   rH   rL   r    r   rJ   rK   r%   r&   s                 r'   r   Kosmos2VisionConfig.__init__   sZ      	"6"&!2!2#6 ($$!2"4!2,$r)   )r   rL   r   rI   rK   rJ   rF   r    r   rG   r   rH   )i   i   r      r         
quick_gelur/   r.   r0   g      ?)
r4   r5   r6   r7   r8   r9   r:   r   r=   r>   r?   s   @r'   rA   rA      sE    $L )J%O % %r)   rA   c                   D   ^  \ rS rSrSrSr\\S.r   SU 4S jjr	Sr
U =r$ )Kosmos2Config   a  
This is the configuration class to store the configuration of a [`Kosmos2Model`]. It is used to instantiate a
KOSMOS-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the KOSMOS-2
[microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.

Args:
    text_config (`dict`, *optional*):
        Dictionary of configuration options used to initialize [`Kosmos2TextConfig`].
    vision_config (`dict`, *optional*):
        Dictionary of configuration options used to initialize [`Kosmos2VisionConfig`].
    latent_query_num (`int`, *optional*, defaults to 64):
        The number of latent query tokens that represent the image features used in the text decoder component.
    kwargs (*optional*):
        Dictionary of keyword arguments.

Example:

```python
>>> from transformers import Kosmos2Config, Kosmos2Model

>>> # Initializing a Kosmos-2 kosmos-2-patch14-224 style configuration
>>> configuration = Kosmos2Config()

>>> # Initializing a model (with random weights) from the kosmos-2-patch14-224 style configuration
>>> model = Kosmos2Model(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```zkosmos-2)r	   rD   c                    > [         TU ]  " S0 UD6  Uc  0 n[        R                  S5        Uc  0 n[        R                  S5        [	        S0 UD6U l        [        S0 UD6U l        X0l        g )NzR`text_config` is `None`. Initializing the `Kosmos2TextConfig` with default values.zV`vision_config` is `None`. Initializing the `Kosmos2VisionConfig` with default values.r   )	r   r   loggerinfor   r	   rA   rD   latent_query_num)r$   r	   rD   rX   r%   r&   s        r'   r   Kosmos2Config.__init__   sk     	"6"KKKlm MKKpq,;{;0A=A 0r)   )rX   r	   rD   )NN@   )r4   r5   r6   r7   r8   r9   r   rA   sub_configsr   r=   r>   r?   s   @r'   rS   rS      s/    > J"3FYZK 	1 1r)   rS   N)r8   configuration_utilsr   utilsr   
get_loggerr4   rV   r   rA   rS   __all__r   r)   r'   <module>r`      sZ    # 3  
		H	%i#( i#XG%* G%T71$ 71t 
r)   